From 84adabe9fcca946b9d476cbeb6b90c8354adfbcf Mon Sep 17 00:00:00 2001 From: JoeyHess <> Date: Mon, 12 Nov 2012 15:05:43 +0000 Subject: version 3.20121112 diff --git a/.ghci b/.ghci index 318bac2..c5550ce 100644 --- a/.ghci +++ b/.ghci @@ -1,4 +1 @@ --- make ghci use precompiled modules, and C library -:set -outputdir=tmp -:set -IUtility :load Common diff --git a/.gitignore b/.gitignore index 4dafe01..eb4a997 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ cabal-dev .dir-locals.el # OSX related .DS_Store +.virthualenv diff --git a/Annex.hs b/Annex.hs index a4a56f5..7fb8afd 100644 --- a/Annex.hs +++ b/Annex.hs @@ -5,11 +5,12 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE GeneralizedNewtypeDeriving, TypeFamilies, MultiParamTypeClasses #-} +{-# LANGUAGE PackageImports, GeneralizedNewtypeDeriving, TypeFamilies, MultiParamTypeClasses #-} module Annex ( Annex, AnnexState(..), + FileInfo(..), PreferredContentMap, new, newState, @@ -29,7 +30,7 @@ module Annex ( fromRepo, ) where -import Control.Monad.State.Strict +import "mtl" Control.Monad.State.Strict import Control.Monad.Trans.Control (StM, MonadBaseControl, liftBaseWith, restoreM) import Control.Monad.Base (liftBase, MonadBase) import System.Posix.Types (Fd) @@ -72,12 +73,17 @@ instance MonadBaseControl IO Annex where liftBaseWith f = Annex $ liftBaseWith $ \runInIO -> f $ liftM StAnnex . runInIO . runAnnex restoreM = Annex . restoreM . unStAnnex - where - unStAnnex (StAnnex st) = st + where + unStAnnex (StAnnex st) = st type Matcher a = Either [Utility.Matcher.Token a] (Utility.Matcher.Matcher a) -type PreferredContentMap = M.Map UUID (Utility.Matcher.Matcher (S.Set UUID -> FilePath -> Annex Bool)) +data FileInfo = FileInfo + { relFile :: FilePath -- may be relative to cwd + , matchFile :: FilePath -- filepath to match on; may be relative to top + } + +type PreferredContentMap = M.Map UUID (Utility.Matcher.Matcher (S.Set UUID -> FileInfo -> Annex Bool)) -- internal state storage data AnnexState = AnnexState @@ -94,7 +100,8 @@ data AnnexState = AnnexState , checkattrhandle :: Maybe CheckAttrHandle , forcebackend :: Maybe String , forcenumcopies :: Maybe Int - , limit :: Matcher (FilePath -> Annex Bool) + , limit :: Matcher (FileInfo -> Annex Bool) + , uuidmap :: Maybe UUIDMap , preferredcontentmap :: Maybe PreferredContentMap , shared :: Maybe SharedRepository , forcetrust :: TrustMap @@ -123,6 +130,7 @@ newState gitrepo = AnnexState , forcebackend = Nothing , forcenumcopies = Nothing , limit = Left [] + , uuidmap = Nothing , preferredcontentmap = Nothing , shared = Nothing , forcetrust = M.empty diff --git a/Annex/Branch.hs b/Annex/Branch.hs index 3b056ee..243514f 100644 --- a/Annex/Branch.hs +++ b/Annex/Branch.hs @@ -147,7 +147,6 @@ updateTo pairs = do then updateIndex branchref else commitBranch branchref merge_desc (nub $ fullname:refs) - invalidateCache liftIO cleanjournal {- Gets the content of a file, which may be in the journal, or committed @@ -168,20 +167,16 @@ getStale :: FilePath -> Annex String getStale = get' True get' :: Bool -> FilePath -> Annex String -get' staleok file = fromcache =<< getCache file +get' staleok file = fromjournal =<< getJournalFile file where - fromcache (Just content) = return content - fromcache Nothing = fromjournal =<< getJournalFile file - fromjournal (Just content) = cache content + fromjournal (Just content) = return content fromjournal Nothing | staleok = withIndex frombranch | otherwise = do update - withIndex $ frombranch >>= cache - frombranch = L.unpack <$> catFile fullname file - cache content = do - setCache file content - return content + frombranch + frombranch = withIndex $ + L.unpack <$> catFile fullname file {- Applies a function to modifiy the content of a file. - @@ -191,11 +186,9 @@ get' staleok file = fromcache =<< getCache file change :: FilePath -> (String -> String) -> Annex () change file a = lockJournal $ a <$> getStale file >>= set file -{- Records new content of a file into the journal and cache. -} +{- Records new content of a file into the journal -} set :: FilePath -> String -> Annex () -set file content = do - setJournalFile file content - setCache file content +set file content = setJournalFile file content {- Stages the journal, and commits staged changes to the branch. -} commit :: String -> Annex () diff --git a/Annex/BranchState.hs b/Annex/BranchState.hs index 2e60d12..9b2f9a0 100644 --- a/Annex/BranchState.hs +++ b/Annex/BranchState.hs @@ -1,6 +1,6 @@ {- git-annex branch state management - - - Runtime state about the git-annex branch, including a small read cache. + - Runtime state about the git-annex branch. - - Copyright 2011-2012 Joey Hess - @@ -22,22 +22,6 @@ setState state = Annex.changeState $ \s -> s { Annex.branchstate = state } changeState :: (BranchState -> BranchState) -> Annex () changeState changer = setState =<< changer <$> getState -setCache :: FilePath -> String -> Annex () -setCache file content = changeState $ \s -> s - { cachedFile = Just file, cachedContent = content} - -getCache :: FilePath -> Annex (Maybe String) -getCache file = from <$> getState - where - from state - | cachedFile state == Just file = - Just $ cachedContent state - | otherwise = Nothing - -invalidateCache :: Annex () -invalidateCache = changeState $ \s -> s - { cachedFile = Nothing, cachedContent = "" } - {- Runs an action to check that the index file exists, if it's not been - checked before in this run of git-annex. -} checkIndexOnce :: Annex () -> Annex () diff --git a/Annex/Journal.hs b/Annex/Journal.hs index 4a56ce3..b6ed792 100644 --- a/Annex/Journal.hs +++ b/Annex/Journal.hs @@ -1,4 +1,4 @@ -{- management of the git-annex journal and cache +{- management of the git-annex journal - - The journal is used to queue up changes before they are committed to the - git-annex branch. Amoung other things, it ensures that if git-annex is diff --git a/Annex/Ssh.hs b/Annex/Ssh.hs index 294270e..2dd73a8 100644 --- a/Annex/Ssh.hs +++ b/Annex/Ssh.hs @@ -99,17 +99,14 @@ sshCleanup = do stopssh socketfile = do let (host, port) = socket2hostport socketfile (_, params) <- sshInfo (host, port) - void $ liftIO $ do - -- "ssh -O stop" is noisy on stderr even with -q - let cmd = unwords $ toCommand $ - [ Params "-O stop" - ] ++ params ++ [Param host] - boolSystem "sh" - [ Param "-c" - , Param $ "ssh " ++ cmd ++ " >/dev/null 2>/dev/null" - ] - -- Cannot remove the lock file; other processes may - -- be waiting on our exclusive lock to use it. + -- "ssh -O stop" is noisy on stderr even with -q + void $ liftIO $ catchMaybeIO $ + withQuietOutput createProcessSuccess $ + proc "ssh" $ toCommand $ + [ Params "-O stop" + ] ++ params ++ [Param host] + -- Cannot remove the lock file; other processes may + -- be waiting on our exclusive lock to use it. hostport2socket :: String -> Maybe Integer -> FilePath hostport2socket host Nothing = host diff --git a/Annex/UUID.hs b/Annex/UUID.hs index df77ac2..16c25c0 100644 --- a/Annex/UUID.hs +++ b/Annex/UUID.hs @@ -18,6 +18,7 @@ module Annex.UUID ( prepUUID, genUUID, removeRepoUUID, + storeUUID, ) where import Common.Annex diff --git a/Annex/Wanted.hs b/Annex/Wanted.hs index d7c28ef..1d98cc0 100644 --- a/Annex/Wanted.hs +++ b/Annex/Wanted.hs @@ -9,7 +9,6 @@ module Annex.Wanted where import Common.Annex import Logs.PreferredContent -import Git.FilePath import Annex.UUID import Types.Remote @@ -18,22 +17,17 @@ import qualified Data.Set as S {- Check if a file is preferred content for the local repository. -} wantGet :: AssociatedFile -> Annex Bool wantGet Nothing = return True -wantGet (Just file) = do - fp <- inRepo $ toTopFilePath file - isPreferredContent Nothing S.empty fp +wantGet (Just file) = isPreferredContent Nothing S.empty file {- Check if a file is preferred content for a remote. -} wantSend :: AssociatedFile -> UUID -> Annex Bool wantSend Nothing _ = return True -wantSend (Just file) to = do - fp <- inRepo $ toTopFilePath file - isPreferredContent (Just to) S.empty fp +wantSend (Just file) to = isPreferredContent (Just to) S.empty file {- Check if a file can be dropped, maybe from a remote. - Don't drop files that are preferred content. -} wantDrop :: Maybe UUID -> AssociatedFile -> Annex Bool wantDrop _ Nothing = return True wantDrop from (Just file) = do - fp <- inRepo $ toTopFilePath file u <- maybe getUUID (return . id) from - not <$> isPreferredContent (Just u) (S.singleton u) fp + not <$> isPreferredContent (Just u) (S.singleton u) file diff --git a/Assistant.hs b/Assistant.hs index 8b326c8..5b3dd9c 100644 --- a/Assistant.hs +++ b/Assistant.hs @@ -66,7 +66,12 @@ - Uses the ScanRemotes map.a - Thread 17: PairListener - Listens for incoming pairing traffic, and takes action. - - Thread 18: WebApp + - Thread 18: ConfigMonitor + - Triggered by changes to the git-annex branch, checks for changed + - config files, and reloads configs. + - Thread 19: XMPPClient + - Built-in XMPP client. + - Thread 20: WebApp - Spawns more threads as necessary to handle clients. - Displays the DaemonStatus. - @@ -97,6 +102,13 @@ - ScanRemotes (STM TMVar) - Remotes that have been disconnected, and should be scanned - are indicated by writing to this TMVar. + - BranchChanged (STM SampleVar) + - Changes to the git-annex branch are indicated by updating this + - SampleVar. + - NetMessager (STM TChan, TMVar, SampleVar) + - Used to feed messages to the built-in XMPP client, handle + - pushes, and signal it when it needs to restart due to configuration + - or networking changes. - UrlRenderer (MVar) - A Yesod route rendering function is stored here. This allows - things that need to render Yesod routes to block until the webapp @@ -108,14 +120,9 @@ module Assistant where import Assistant.Common -import Assistant.ThreadedMonad import Assistant.DaemonStatus -import Assistant.Changes -import Assistant.Commits -import Assistant.Pushes -import Assistant.ScanRemotes -import Assistant.TransferQueue -import Assistant.TransferSlots +import Assistant.NamedThread +import Assistant.Types.ThreadedMonad import Assistant.Threads.DaemonStatus import Assistant.Threads.Watcher import Assistant.Threads.Committer @@ -128,12 +135,16 @@ import Assistant.Threads.MountWatcher import Assistant.Threads.NetWatcher import Assistant.Threads.TransferScanner import Assistant.Threads.TransferPoller +import Assistant.Threads.ConfigMonitor #ifdef WITH_WEBAPP import Assistant.WebApp import Assistant.Threads.WebApp #ifdef WITH_PAIRING import Assistant.Threads.PairListener #endif +#ifdef WITH_XMPP +import Assistant.Threads.XMPPClient +#endif #else #warning Building without the webapp. You probably need to install Yesod.. #endif @@ -158,51 +169,52 @@ startDaemon assistant foreground webappwaiter logfd <- liftIO . openLog =<< fromRepo gitAnnexLogFile pidfile <- fromRepo gitAnnexPidFile go $ Utility.Daemon.daemonize logfd (Just pidfile) False - where - go d = startAssistant assistant d webappwaiter + where + go d = startAssistant assistant d webappwaiter startAssistant :: Bool -> (IO () -> IO ()) -> Maybe (String -> FilePath -> IO ()) -> Annex () startAssistant assistant daemonize webappwaiter = withThreadState $ \st -> do checkCanWatch dstatus <- startDaemonStatus - liftIO $ daemonize $ run dstatus st - where - run dstatus st = do - changechan <- newChangeChan - commitchan <- newCommitChan - pushmap <- newFailedPushMap - transferqueue <- newTransferQueue - transferslots <- newTransferSlots - scanremotes <- newScanRemoteMap + liftIO $ daemonize $ + flip runAssistant go =<< newAssistantData st dstatus + where + go = do + d <- getAssistant id #ifdef WITH_WEBAPP - urlrenderer <- newUrlRenderer + urlrenderer <- liftIO newUrlRenderer #endif - mapM_ (startthread dstatus) - [ watch $ commitThread st changechan commitchan transferqueue dstatus + mapM_ (startthread d) + [ watch $ commitThread #ifdef WITH_WEBAPP - , assist $ webAppThread (Just st) dstatus scanremotes transferqueue transferslots urlrenderer Nothing webappwaiter + , assist $ webAppThread d urlrenderer False Nothing webappwaiter #ifdef WITH_PAIRING - , assist $ pairListenerThread st dstatus scanremotes urlrenderer + , assist $ pairListenerThread urlrenderer +#endif +#ifdef WITH_XMPP + , assist $ xmppClientThread urlrenderer #endif #endif - , assist $ pushThread st dstatus commitchan pushmap - , assist $ pushRetryThread st dstatus pushmap - , assist $ mergeThread st dstatus transferqueue - , assist $ transferWatcherThread st dstatus transferqueue - , assist $ transferPollerThread st dstatus - , assist $ transfererThread st dstatus transferqueue transferslots - , assist $ daemonStatusThread st dstatus - , assist $ sanityCheckerThread st dstatus transferqueue changechan - , assist $ mountWatcherThread st dstatus scanremotes - , assist $ netWatcherThread st dstatus scanremotes - , assist $ netWatcherFallbackThread st dstatus scanremotes - , assist $ transferScannerThread st dstatus scanremotes transferqueue - , watch $ watchThread st dstatus transferqueue changechan - ] - waitForTermination - watch a = (True, a) - assist a = (False, a) - startthread dstatus (watcher, t) - | watcher || assistant = void $ forkIO $ - runNamedThread dstatus t - | otherwise = noop + , assist $ pushThread + , assist $ pushRetryThread + , assist $ mergeThread + , assist $ transferWatcherThread + , assist $ transferPollerThread + , assist $ transfererThread + , assist $ daemonStatusThread + , assist $ sanityCheckerThread + , assist $ mountWatcherThread + , assist $ netWatcherThread + , assist $ netWatcherFallbackThread + , assist $ transferScannerThread + , assist $ configMonitorThread + , watch $ watchThread + ] + liftIO waitForTermination + + watch a = (True, a) + assist a = (False, a) + startthread d (watcher, t) + | watcher || assistant = void $ liftIO $ forkIO $ + runAssistant d $ runNamedThread t + | otherwise = noop diff --git a/Assistant/Alert.hs b/Assistant/Alert.hs index f11ad8f..7e825d8 100644 --- a/Assistant/Alert.hs +++ b/Assistant/Alert.hs @@ -5,7 +5,7 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE RankNTypes, OverloadedStrings #-} +{-# LANGUAGE TemplateHaskell, QuasiQuotes, OverloadedStrings #-} module Assistant.Alert where @@ -18,6 +18,7 @@ import qualified Data.Text as T import Data.Text (Text) import qualified Data.Map as M import Data.String +import Yesod {- Different classes of alerts are displayed differently. -} data AlertClass = Success | Message | Activity | Warning | Error @@ -33,6 +34,7 @@ data AlertName | SanityCheckFixAlert | WarningAlert String | PairAlert String + | XMPPNeededAlert deriving (Eq) {- The first alert is the new alert, the second is an old alert. @@ -53,13 +55,18 @@ data Alert = Alert , alertButton :: Maybe AlertButton } -data AlertIcon = ActivityIcon | SuccessIcon | ErrorIcon | InfoIcon +data AlertIcon = ActivityIcon | SuccessIcon | ErrorIcon | InfoIcon | TheCloud -bootstrapIcon :: AlertIcon -> String -bootstrapIcon ActivityIcon = "refresh" -bootstrapIcon InfoIcon = "info-sign" -bootstrapIcon SuccessIcon = "ok" -bootstrapIcon ErrorIcon = "exclamation-sign" +htmlIcon :: AlertIcon -> GWidget sub master () +htmlIcon ActivityIcon = bootStrapIcon "refresh" +htmlIcon InfoIcon = bootStrapIcon "info-sign" +htmlIcon SuccessIcon = bootStrapIcon "ok" +htmlIcon ErrorIcon = bootStrapIcon "exclamation-sign" +-- utf-8 umbrella (utf-8 cloud looks too stormy) +htmlIcon TheCloud = [whamlet|☂|] + +bootStrapIcon :: Text -> GWidget sub master () +bootStrapIcon name = [whamlet||] {- When clicked, a button always redirects to a URL - It may also run an IO action in the background, which is useful @@ -151,11 +158,11 @@ makeAlertFiller success alert , alertButton = Nothing , alertIcon = Just $ if success then SuccessIcon else ErrorIcon } - where - c = alertClass alert - c' - | success = Success - | otherwise = Error + where + c = alertClass alert + c' + | success = Success + | otherwise = Error isFiller :: Alert -> Bool isFiller alert = alertPriority alert == Filler @@ -172,23 +179,23 @@ isFiller alert = alertPriority alert == Filler -} mergeAlert :: AlertId -> Alert -> AlertMap -> AlertMap mergeAlert i al m = maybe updatePrune updateCombine (alertCombiner al) - where - pruneSame k al' = k == i || not (effectivelySameAlert al al') - pruneBloat m' - | bloat > 0 = M.fromList $ pruneold $ M.toList m' - | otherwise = m' - where - bloat = M.size m' - maxAlerts - pruneold l = - let (f, rest) = partition (\(_, a) -> isFiller a) l - in drop bloat f ++ rest - updatePrune = pruneBloat $ M.filterWithKey pruneSame $ - M.insertWith' const i al m - updateCombine combiner = - let combined = M.mapMaybe (combiner al) m - in if M.null combined - then updatePrune - else M.delete i $ M.union combined m + where + pruneSame k al' = k == i || not (effectivelySameAlert al al') + pruneBloat m' + | bloat > 0 = M.fromList $ pruneold $ M.toList m' + | otherwise = m' + where + bloat = M.size m' - maxAlerts + pruneold l = + let (f, rest) = partition (\(_, a) -> isFiller a) l + in drop bloat f ++ rest + updatePrune = pruneBloat $ M.filterWithKey pruneSame $ + M.insertWith' const i al m + updateCombine combiner = + let combined = M.mapMaybe (combiner al) m + in if M.null combined + then updatePrune + else M.delete i $ M.union combined m baseActivityAlert :: Alert baseActivityAlert = Alert @@ -281,10 +288,10 @@ sanityCheckFixAlert msg = Alert , alertCombiner = Just $ dataCombiner (++) , alertButton = Nothing } - where - render dta = tenseWords $ alerthead : dta ++ [alertfoot] - alerthead = "The daily sanity check found and fixed a problem:" - alertfoot = "If these problems persist, consider filing a bug report." + where + render dta = tenseWords $ alerthead : dta ++ [alertfoot] + alerthead = "The daily sanity check found and fixed a problem:" + alertfoot = "If these problems persist, consider filing a bug report." pairingAlert :: AlertButton -> Alert pairingAlert button = baseActivityAlert @@ -294,38 +301,53 @@ pairingAlert button = baseActivityAlert } pairRequestReceivedAlert :: String -> AlertButton -> Alert -pairRequestReceivedAlert repo button = Alert +pairRequestReceivedAlert who button = Alert { alertClass = Message , alertHeader = Nothing , alertMessageRender = tenseWords - , alertData = [UnTensed $ T.pack $ repo ++ " is sending a pair request."] + , alertData = [UnTensed $ T.pack $ who ++ " is sending a pair request."] , alertBlockDisplay = False , alertPriority = High , alertClosable = True , alertIcon = Just InfoIcon - , alertName = Just $ PairAlert repo + , alertName = Just $ PairAlert who , alertCombiner = Just $ dataCombiner $ \_old new -> new , alertButton = Just button } pairRequestAcknowledgedAlert :: String -> Maybe AlertButton -> Alert -pairRequestAcknowledgedAlert repo button = baseActivityAlert - { alertData = ["Pair request with", UnTensed (T.pack repo), Tensed "in progress" "complete"] +pairRequestAcknowledgedAlert who button = baseActivityAlert + { alertData = ["Pairing with", UnTensed (T.pack who), Tensed "in progress" "complete"] , alertPriority = High , alertCombiner = Just $ dataCombiner $ \_old new -> new , alertButton = button } +xmppNeededAlert :: AlertButton -> Alert +xmppNeededAlert button = Alert + { alertHeader = Just "Share with friends, and keep your devices in sync across the cloud." + , alertIcon = Just TheCloud + , alertPriority = High + , alertButton = Just button + , alertClosable = True + , alertClass = Message + , alertMessageRender = tenseWords + , alertBlockDisplay = True + , alertName = Just $ XMPPNeededAlert + , alertCombiner = Just $ dataCombiner $ \_old new -> new + , alertData = [] + } + fileAlert :: TenseChunk -> FilePath -> Alert fileAlert msg file = (activityAlert Nothing [f]) { alertName = Just $ FileAlert msg , alertMessageRender = render , alertCombiner = Just $ dataCombiner combiner } - where - f = fromString $ shortFile $ takeFileName file - render fs = tenseWords $ msg : fs - combiner new old = take 10 $ new ++ old + where + f = fromString $ shortFile $ takeFileName file + render fs = tenseWords $ msg : fs + combiner new old = take 10 $ new ++ old addFileAlert :: FilePath -> Alert addFileAlert = fileAlert (Tensed "Adding" "Added") @@ -350,8 +372,8 @@ shortFile :: FilePath -> String shortFile f | len < maxlen = f | otherwise = take half f ++ ".." ++ drop (len - half) f - where - len = length f - maxlen = 20 - half = (maxlen - 2) `div` 2 + where + len = length f + maxlen = 20 + half = (maxlen - 2) `div` 2 diff --git a/Assistant/BranchChange.hs b/Assistant/BranchChange.hs new file mode 100644 index 0000000..c935454 --- /dev/null +++ b/Assistant/BranchChange.hs @@ -0,0 +1,19 @@ +{- git-annex assistant git-annex branch change tracking + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.BranchChange where + +import Assistant.Common +import Assistant.Types.BranchChange + +import Control.Concurrent.MSampleVar + +branchChanged :: Assistant () +branchChanged = flip writeSV () <<~ (fromBranchChangeHandle . branchChangeHandle) + +waitBranchChange :: Assistant () +waitBranchChange = readSV <<~ (fromBranchChangeHandle . branchChangeHandle) diff --git a/Assistant/Changes.hs b/Assistant/Changes.hs index cccc372..3d39568 100644 --- a/Assistant/Changes.hs +++ b/Assistant/Changes.hs @@ -7,78 +7,33 @@ module Assistant.Changes where -import Common.Annex -import qualified Annex.Queue -import Types.KeySource +import Assistant.Common +import Assistant.Types.Changes import Utility.TSet import Data.Time.Clock -data ChangeType = AddChange | LinkChange | RmChange | RmDirChange - deriving (Show, Eq) - -type ChangeChan = TSet Change - -data Change - = Change - { changeTime :: UTCTime - , changeFile :: FilePath - , changeType :: ChangeType - } - | PendingAddChange - { changeTime ::UTCTime - , changeFile :: FilePath - } - | InProcessAddChange - { changeTime ::UTCTime - , keySource :: KeySource - } - deriving (Show) - -newChangeChan :: IO ChangeChan -newChangeChan = newTSet - {- Handlers call this when they made a change that needs to get committed. -} -madeChange :: FilePath -> ChangeType -> Annex (Maybe Change) -madeChange f t = do - -- Just in case the commit thread is not flushing the queue fast enough. - Annex.Queue.flushWhenFull - liftIO $ Just <$> (Change <$> getCurrentTime <*> pure f <*> pure t) +madeChange :: FilePath -> ChangeType -> Assistant (Maybe Change) +madeChange f t = Just <$> (Change <$> liftIO getCurrentTime <*> pure f <*> pure t) -noChange :: Annex (Maybe Change) +noChange :: Assistant (Maybe Change) noChange = return Nothing {- Indicates an add needs to be done, but has not started yet. -} -pendingAddChange :: FilePath -> Annex (Maybe Change) -pendingAddChange f = - liftIO $ Just <$> (PendingAddChange <$> getCurrentTime <*> pure f) - -isPendingAddChange :: Change -> Bool -isPendingAddChange (PendingAddChange {}) = True -isPendingAddChange _ = False - -isInProcessAddChange :: Change -> Bool -isInProcessAddChange (InProcessAddChange {}) = True -isInProcessAddChange _ = False - -finishedChange :: Change -> Change -finishedChange c@(InProcessAddChange { keySource = ks }) = Change - { changeTime = changeTime c - , changeFile = keyFilename ks - , changeType = AddChange - } -finishedChange c = c +pendingAddChange :: FilePath -> Assistant (Maybe Change) +pendingAddChange f = Just <$> (PendingAddChange <$> liftIO getCurrentTime <*> pure f) {- Gets all unhandled changes. - Blocks until at least one change is made. -} -getChanges :: ChangeChan -> IO [Change] -getChanges = getTSet +getChanges :: Assistant [Change] +getChanges = getTSet <<~ changeChan {- Puts unhandled changes back into the channel. - Note: Original order is not preserved. -} -refillChanges :: ChangeChan -> [Change] -> IO () -refillChanges = putTSet +refillChanges :: [Change] -> Assistant () +refillChanges cs = flip putTSet cs <<~ changeChan {- Records a change in the channel. -} -recordChange :: ChangeChan -> Change -> IO () -recordChange = putTSet1 +recordChange :: Change -> Assistant () +recordChange c = flip putTSet1 c <<~ changeChan diff --git a/Assistant/Commits.hs b/Assistant/Commits.hs index 86fd759..79555fe 100644 --- a/Assistant/Commits.hs +++ b/Assistant/Commits.hs @@ -7,28 +7,21 @@ module Assistant.Commits where -import Utility.TSet - -import Data.Time.Clock - -type CommitChan = TSet Commit +import Assistant.Common +import Assistant.Types.Commits -data Commit = Commit UTCTime - deriving (Show) - -newCommitChan :: IO CommitChan -newCommitChan = newTSet +import Utility.TSet {- Gets all unhandled commits. - Blocks until at least one commit is made. -} -getCommits :: CommitChan -> IO [Commit] -getCommits = getTSet +getCommits :: Assistant [Commit] +getCommits = getTSet <<~ commitChan {- Puts unhandled commits back into the channel. - Note: Original order is not preserved. -} -refillCommits :: CommitChan -> [Commit] -> IO () -refillCommits = putTSet +refillCommits :: [Commit] -> Assistant () +refillCommits cs = flip putTSet cs <<~ commitChan {- Records a commit in the channel. -} -recordCommit :: CommitChan -> Commit -> IO () -recordCommit = putTSet1 +recordCommit :: Assistant () +recordCommit = flip putTSet1 Commit <<~ commitChan diff --git a/Assistant/Common.hs b/Assistant/Common.hs index d6df77f..0be5362 100644 --- a/Assistant/Common.hs +++ b/Assistant/Common.hs @@ -1,45 +1,13 @@ -{- Common infrastructure for the git-annex assistant threads. +{- Common infrastructure for the git-annex assistant. - - Copyright 2012 Joey Hess - - Licensed under the GNU GPL version 3 or higher. -} -module Assistant.Common ( - module X, - ThreadName, - NamedThread(..), - runNamedThread, - debug -) where +module Assistant.Common (module X) where import Common.Annex as X -import Assistant.DaemonStatus -import Assistant.Alert - -import System.Log.Logger -import qualified Control.Exception as E - -type ThreadName = String -data NamedThread = NamedThread ThreadName (IO ()) - -debug :: ThreadName -> [String] -> IO () -debug threadname ws = debugM threadname $ unwords $ (threadname ++ ":") : ws - -runNamedThread :: DaemonStatusHandle -> NamedThread -> IO () -runNamedThread dstatus (NamedThread name a) = go - where - go = do - r <- E.try a :: IO (Either E.SomeException ()) - case r of - Right _ -> noop - Left e -> do - let msg = unwords - [ name - , "crashed:" - , show e - ] - hPutStrLn stderr msg - -- TODO click to restart - void $ addAlert dstatus $ - warningAlert name msg +import Assistant.Monad as X +import Assistant.Types.DaemonStatus as X +import Assistant.Types.NamedThread as X diff --git a/Assistant/DaemonStatus.hs b/Assistant/DaemonStatus.hs index 60b560b..8a4a7a1 100644 --- a/Assistant/DaemonStatus.hs +++ b/Assistant/DaemonStatus.hs @@ -5,19 +5,18 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE CPP, RankNTypes, ImpredicativeTypes #-} - module Assistant.DaemonStatus where -import Common.Annex +import Assistant.Common import Assistant.Alert -import Assistant.Pairing import Utility.TempFile +import Assistant.Types.NetMessager import Utility.NotificationBroadcaster import Logs.Transfer import Logs.Trust import qualified Remote import qualified Types.Remote as Remote +import qualified Git import Config import Control.Concurrent.STM @@ -26,83 +25,42 @@ import Data.Time.Clock.POSIX import Data.Time import System.Locale import qualified Data.Map as M - -data DaemonStatus = DaemonStatus - -- False when the daemon is performing its startup scan - { scanComplete :: Bool - -- Time when a previous process of the daemon was running ok - , lastRunning :: Maybe POSIXTime - -- True when the sanity checker is running - , sanityCheckRunning :: Bool - -- Last time the sanity checker ran - , lastSanityCheck :: Maybe POSIXTime - -- Currently running file content transfers - , currentTransfers :: TransferMap - -- Messages to display to the user. - , alertMap :: AlertMap - , lastAlertId :: AlertId - -- Ordered list of remotes to sync with. - , syncRemotes :: [Remote] - -- Pairing request that is in progress. - , pairingInProgress :: Maybe PairingInProgress - -- Broadcasts notifications about all changes to the DaemonStatus - , changeNotifier :: NotificationBroadcaster - -- Broadcasts notifications when queued or current transfers change. - , transferNotifier :: NotificationBroadcaster - -- Broadcasts notifications when there's a change to the alerts - , alertNotifier :: NotificationBroadcaster - } - -type TransferMap = M.Map Transfer TransferInfo - -{- This TMVar is never left empty, so accessing it will never block. -} -type DaemonStatusHandle = TMVar DaemonStatus - -newDaemonStatus :: IO DaemonStatus -newDaemonStatus = DaemonStatus - <$> pure False - <*> pure Nothing - <*> pure False - <*> pure Nothing - <*> pure M.empty - <*> pure M.empty - <*> pure firstAlertId - <*> pure [] - <*> pure Nothing - <*> newNotificationBroadcaster - <*> newNotificationBroadcaster - <*> newNotificationBroadcaster - -getDaemonStatus :: DaemonStatusHandle -> IO DaemonStatus -getDaemonStatus = atomically . readTMVar - -modifyDaemonStatus_ :: DaemonStatusHandle -> (DaemonStatus -> DaemonStatus) -> IO () -modifyDaemonStatus_ dstatus a = modifyDaemonStatus dstatus $ \s -> (a s, ()) - -modifyDaemonStatus :: DaemonStatusHandle -> (DaemonStatus -> (DaemonStatus, b)) -> IO b -modifyDaemonStatus dstatus a = do - (s, b) <- atomically $ do - r@(s, _) <- a <$> takeTMVar dstatus - putTMVar dstatus s - return r - sendNotification $ changeNotifier s - return b - -{- Syncable remotes ordered by cost. -} -calcSyncRemotes :: Annex [Remote] +import qualified Data.Text as T + +getDaemonStatus :: Assistant DaemonStatus +getDaemonStatus = (atomically . readTMVar) <<~ daemonStatusHandle + +modifyDaemonStatus_ :: (DaemonStatus -> DaemonStatus) -> Assistant () +modifyDaemonStatus_ a = modifyDaemonStatus $ \s -> (a s, ()) + +modifyDaemonStatus :: (DaemonStatus -> (DaemonStatus, b)) -> Assistant b +modifyDaemonStatus a = do + dstatus <- getAssistant daemonStatusHandle + liftIO $ do + (s, b) <- atomically $ do + r@(s, _) <- a <$> takeTMVar dstatus + putTMVar dstatus s + return r + sendNotification $ changeNotifier s + return b + +{- Returns a function that updates the lists of syncable remotes. -} +calcSyncRemotes :: Annex (DaemonStatus -> DaemonStatus) calcSyncRemotes = do rs <- filterM (repoSyncable . Remote.repo) =<< concat . Remote.byCost <$> Remote.enabledRemoteList - alive <- snd <$> trustPartition DeadTrusted (map Remote.uuid rs) + alive <- trustExclude DeadTrusted (map Remote.uuid rs) let good r = Remote.uuid r `elem` alive - return $ filter good rs + let syncable = filter good rs + return $ \dstatus -> dstatus + { syncRemotes = syncable + , syncGitRemotes = filter (not . Remote.specialRemote) syncable + , syncDataRemotes = filter (not . isXMPPRemote) syncable + } {- Updates the sycRemotes list from the list of all remotes in Annex state. -} -updateSyncRemotes :: DaemonStatusHandle -> Annex () -updateSyncRemotes dstatus = do - remotes <- calcSyncRemotes - liftIO $ modifyDaemonStatus_ dstatus $ - \s -> s { syncRemotes = remotes } +updateSyncRemotes :: Assistant () +updateSyncRemotes = modifyDaemonStatus_ =<< liftAnnex calcSyncRemotes {- Load any previous daemon status file, and store it in a MVar for this - process to use as its DaemonStatus. Also gets current transfer status. -} @@ -112,12 +70,11 @@ startDaemonStatus = do status <- liftIO $ flip catchDefaultIO (readDaemonStatusFile file) =<< newDaemonStatus transfers <- M.fromList <$> getTransfers - remotes <- calcSyncRemotes - liftIO $ atomically $ newTMVar status + addsync <- calcSyncRemotes + liftIO $ atomically $ newTMVar $ addsync $ status { scanComplete = False , sanityCheckRunning = False , currentTransfers = transfers - , syncRemotes = remotes } {- Don't just dump out the structure, because it will change over time, @@ -125,34 +82,34 @@ startDaemonStatus = do writeDaemonStatusFile :: FilePath -> DaemonStatus -> IO () writeDaemonStatusFile file status = viaTmp writeFile file =<< serialized <$> getPOSIXTime - where - serialized now = unlines - [ "lastRunning:" ++ show now - , "scanComplete:" ++ show (scanComplete status) - , "sanityCheckRunning:" ++ show (sanityCheckRunning status) - , "lastSanityCheck:" ++ maybe "" show (lastSanityCheck status) - ] + where + serialized now = unlines + [ "lastRunning:" ++ show now + , "scanComplete:" ++ show (scanComplete status) + , "sanityCheckRunning:" ++ show (sanityCheckRunning status) + , "lastSanityCheck:" ++ maybe "" show (lastSanityCheck status) + ] readDaemonStatusFile :: FilePath -> IO DaemonStatus readDaemonStatusFile file = parse <$> newDaemonStatus <*> readFile file - where - parse status = foldr parseline status . lines - parseline line status - | key == "lastRunning" = parseval readtime $ \v -> - status { lastRunning = Just v } - | key == "scanComplete" = parseval readish $ \v -> - status { scanComplete = v } - | key == "sanityCheckRunning" = parseval readish $ \v -> - status { sanityCheckRunning = v } - | key == "lastSanityCheck" = parseval readtime $ \v -> - status { lastSanityCheck = Just v } - | otherwise = status -- unparsable line - where - (key, value) = separate (== ':') line - parseval parser a = maybe status a (parser value) - readtime s = do - d <- parseTime defaultTimeLocale "%s%Qs" s - Just $ utcTimeToPOSIXSeconds d + where + parse status = foldr parseline status . lines + parseline line status + | key == "lastRunning" = parseval readtime $ \v -> + status { lastRunning = Just v } + | key == "scanComplete" = parseval readish $ \v -> + status { scanComplete = v } + | key == "sanityCheckRunning" = parseval readish $ \v -> + status { sanityCheckRunning = v } + | key == "lastSanityCheck" = parseval readtime $ \v -> + status { lastSanityCheck = Just v } + | otherwise = status -- unparsable line + where + (key, value) = separate (== ':') line + parseval parser a = maybe status a (parser value) + readtime s = do + d <- parseTime defaultTimeLocale "%s%Qs" s + Just $ utcTimeToPOSIXSeconds d {- Checks if a time stamp was made after the daemon was lastRunning. - @@ -164,9 +121,9 @@ readDaemonStatusFile file = parse <$> newDaemonStatus <*> readFile file -} afterLastDaemonRun :: EpochTime -> DaemonStatus -> Bool afterLastDaemonRun timestamp status = maybe False (< t) (lastRunning status) - where - t = realToFrac (timestamp + slop) :: POSIXTime - slop = fromIntegral tenMinutes + where + t = realToFrac (timestamp + slop) :: POSIXTime + slop = fromIntegral tenMinutes tenMinutes :: Int tenMinutes = 10 * 60 @@ -181,91 +138,100 @@ adjustTransfersSTM dstatus a = do putTMVar dstatus $ s { currentTransfers = a (currentTransfers s) } {- Alters a transfer's info, if the transfer is in the map. -} -alterTransferInfo :: DaemonStatusHandle -> Transfer -> (TransferInfo -> TransferInfo) -> IO () -alterTransferInfo dstatus t a = updateTransferInfo' dstatus $ M.adjust a t +alterTransferInfo :: Transfer -> (TransferInfo -> TransferInfo) -> Assistant () +alterTransferInfo t a = updateTransferInfo' $ M.adjust a t {- Updates a transfer's info. Adds the transfer to the map if necessary, - or if already present, updates it while preserving the old transferTid, - transferPaused, and bytesComplete values, which are not written to disk. -} -updateTransferInfo :: DaemonStatusHandle -> Transfer -> TransferInfo -> IO () -updateTransferInfo dstatus t info = updateTransferInfo' dstatus $ - M.insertWith' merge t info - where - merge new old = new - { transferTid = maybe (transferTid new) Just (transferTid old) - , transferPaused = transferPaused new || transferPaused old - , bytesComplete = maybe (bytesComplete new) Just (bytesComplete old) - } +updateTransferInfo :: Transfer -> TransferInfo -> Assistant () +updateTransferInfo t info = updateTransferInfo' $ M.insertWith' merge t info + where + merge new old = new + { transferTid = maybe (transferTid new) Just (transferTid old) + , transferPaused = transferPaused new || transferPaused old + , bytesComplete = maybe (bytesComplete new) Just (bytesComplete old) + } -updateTransferInfo' :: DaemonStatusHandle -> (TransferMap -> TransferMap) -> IO () -updateTransferInfo' dstatus a = - notifyTransfer dstatus `after` modifyDaemonStatus_ dstatus go - where - go s = s { currentTransfers = a (currentTransfers s) } +updateTransferInfo' :: (TransferMap -> TransferMap) -> Assistant () +updateTransferInfo' a = notifyTransfer `after` modifyDaemonStatus_ update + where + update s = s { currentTransfers = a (currentTransfers s) } {- Removes a transfer from the map, and returns its info. -} -removeTransfer :: DaemonStatusHandle -> Transfer -> IO (Maybe TransferInfo) -removeTransfer dstatus t = - notifyTransfer dstatus `after` modifyDaemonStatus dstatus go - where - go s = - let (info, ts) = M.updateLookupWithKey - (\_k _v -> Nothing) - t (currentTransfers s) - in (s { currentTransfers = ts }, info) +removeTransfer :: Transfer -> Assistant (Maybe TransferInfo) +removeTransfer t = notifyTransfer `after` modifyDaemonStatus remove + where + remove s = + let (info, ts) = M.updateLookupWithKey + (\_k _v -> Nothing) + t (currentTransfers s) + in (s { currentTransfers = ts }, info) {- Send a notification when a transfer is changed. -} -notifyTransfer :: DaemonStatusHandle -> IO () -notifyTransfer dstatus = sendNotification - =<< transferNotifier <$> atomically (readTMVar dstatus) +notifyTransfer :: Assistant () +notifyTransfer = do + dstatus <- getAssistant daemonStatusHandle + liftIO $ sendNotification + =<< transferNotifier <$> atomically (readTMVar dstatus) {- Send a notification when alerts are changed. -} -notifyAlert :: DaemonStatusHandle -> IO () -notifyAlert dstatus = sendNotification - =<< alertNotifier <$> atomically (readTMVar dstatus) +notifyAlert :: Assistant () +notifyAlert = do + dstatus <- getAssistant daemonStatusHandle + liftIO $ sendNotification + =<< alertNotifier <$> atomically (readTMVar dstatus) {- Returns the alert's identifier, which can be used to remove it. -} -addAlert :: DaemonStatusHandle -> Alert -> IO AlertId -addAlert dstatus alert = notifyAlert dstatus `after` modifyDaemonStatus dstatus go - where - go s = (s { lastAlertId = i, alertMap = m }, i) - where - i = nextAlertId $ lastAlertId s - m = mergeAlert i alert (alertMap s) +addAlert :: Alert -> Assistant AlertId +addAlert alert = notifyAlert `after` modifyDaemonStatus add + where + add s = (s { lastAlertId = i, alertMap = m }, i) + where + i = nextAlertId $ lastAlertId s + m = mergeAlert i alert (alertMap s) -removeAlert :: DaemonStatusHandle -> AlertId -> IO () -removeAlert dstatus i = updateAlert dstatus i (const Nothing) +removeAlert :: AlertId -> Assistant () +removeAlert i = updateAlert i (const Nothing) -updateAlert :: DaemonStatusHandle -> AlertId -> (Alert -> Maybe Alert) -> IO () -updateAlert dstatus i a = updateAlertMap dstatus $ \m -> M.update a i m +updateAlert :: AlertId -> (Alert -> Maybe Alert) -> Assistant () +updateAlert i a = updateAlertMap $ \m -> M.update a i m -updateAlertMap :: DaemonStatusHandle -> (AlertMap -> AlertMap) -> IO () -updateAlertMap dstatus a = notifyAlert dstatus `after` modifyDaemonStatus_ dstatus go - where - go s = s { alertMap = a (alertMap s) } +updateAlertMap :: (AlertMap -> AlertMap) -> Assistant () +updateAlertMap a = notifyAlert `after` modifyDaemonStatus_ update + where + update s = s { alertMap = a (alertMap s) } {- Displays an alert while performing an activity that returns True on - success. - - The alert is left visible afterwards, as filler. - Old filler is pruned, to prevent the map growing too large. -} -alertWhile :: DaemonStatusHandle -> Alert -> IO Bool -> IO Bool -alertWhile dstatus alert a = alertWhile' dstatus alert $ do +alertWhile :: Alert -> Assistant Bool -> Assistant Bool +alertWhile alert a = alertWhile' alert $ do r <- a return (r, r) {- Like alertWhile, but allows the activity to return a value too. -} -alertWhile' :: DaemonStatusHandle -> Alert -> IO (Bool, a) -> IO a -alertWhile' dstatus alert a = do +alertWhile' :: Alert -> Assistant (Bool, a) -> Assistant a +alertWhile' alert a = do let alert' = alert { alertClass = Activity } - i <- addAlert dstatus alert' + i <- addAlert alert' (ok, r) <- a - updateAlertMap dstatus $ mergeAlert i $ makeAlertFiller ok alert' + updateAlertMap $ mergeAlert i $ makeAlertFiller ok alert' return r {- Displays an alert while performing an activity, then removes it. -} -alertDuring :: DaemonStatusHandle -> Alert -> IO a -> IO a -alertDuring dstatus alert a = do - let alert' = alert { alertClass = Activity } - i <- addAlert dstatus alert' - removeAlert dstatus i `after` a +alertDuring :: Alert -> Assistant a -> Assistant a +alertDuring alert a = do + i <- addAlert $ alert { alertClass = Activity } + removeAlert i `after` a + +{- Remotes using the XMPP transport have urls like xmpp::user@host -} +isXMPPRemote :: Remote -> Bool +isXMPPRemote remote = Git.repoIsUrl r && "xmpp::" `isPrefixOf` Git.repoLocation r + where + r = Remote.repo remote + +getXMPPClientID :: Remote -> ClientID +getXMPPClientID r = T.pack $ drop (length "xmpp::") (Git.repoLocation (Remote.repo r)) diff --git a/Assistant/Drop.hs b/Assistant/Drop.hs new file mode 100644 index 0000000..66e738a --- /dev/null +++ b/Assistant/Drop.hs @@ -0,0 +1,65 @@ +{- git-annex assistant dropping of unwanted content + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Drop where + +import Assistant.Common +import Assistant.DaemonStatus +import Logs.Location +import Logs.Trust +import Types.Remote (AssociatedFile) +import qualified Remote +import qualified Command.Drop +import Command +import Annex.Wanted +import Config + +{- Drop from local and/or remote when allowed by the preferred content and + - numcopies settings. -} +handleDrops :: Bool -> Key -> AssociatedFile -> Assistant () +handleDrops _ _ Nothing = noop +handleDrops fromhere key f = do + syncrs <- syncDataRemotes <$> getDaemonStatus + liftAnnex $ do + locs <- loggedLocations key + handleDrops' locs syncrs fromhere key f + +handleDrops' :: [UUID] -> [Remote] -> Bool -> Key -> AssociatedFile -> Annex () +handleDrops' _ _ _ _ Nothing = noop +handleDrops' locs rs fromhere key (Just f) + | fromhere = do + n <- getcopies + if checkcopies n + then go rs =<< dropl n + else go rs n + | otherwise = go rs =<< getcopies + where + getcopies = do + have <- length <$> trustExclude UnTrusted locs + numcopies <- getNumCopies =<< numCopies f + return (have, numcopies) + checkcopies (have, numcopies) = have > numcopies + decrcopies (have, numcopies) = (have - 1, numcopies) + + go [] _ = noop + go (r:rest) n + | checkcopies n = dropr r n >>= go rest + | otherwise = noop + + checkdrop n@(_, numcopies) u a = ifM (wantDrop u (Just f)) + ( ifM (doCommand $ a (Just numcopies)) + ( return $ decrcopies n + , return n + ) + , return n + ) + + dropl n = checkdrop n Nothing $ \numcopies -> + Command.Drop.startLocal f numcopies key + + dropr r n = checkdrop n (Just $ Remote.uuid r) $ \numcopies -> + Command.Drop.startRemote f numcopies key r diff --git a/Assistant/Install.hs b/Assistant/Install.hs index 1bf424c..635c265 100644 --- a/Assistant/Install.hs +++ b/Assistant/Install.hs @@ -36,36 +36,35 @@ standaloneAppBase = getEnv "GIT_ANNEX_APP_BASE" -} ensureInstalled :: IO () ensureInstalled = go =<< standaloneAppBase - where - go Nothing = noop - go (Just base) = do - let program = base ++ "runshell git-annex" - programfile <- programFile - createDirectoryIfMissing True (parentDir programfile) - writeFile programfile program + where + go Nothing = noop + go (Just base) = do + let program = base ++ "runshell git-annex" + programfile <- programFile + createDirectoryIfMissing True (parentDir programfile) + writeFile programfile program #ifdef darwin_HOST_OS - autostartfile <- userAutoStart osxAutoStartLabel + autostartfile <- userAutoStart osxAutoStartLabel #else - autostartfile <- autoStartPath "git-annex" - <$> userConfigDir + autostartfile <- autoStartPath "git-annex" <$> userConfigDir #endif - installAutoStart program autostartfile + installAutoStart program autostartfile - {- This shim is only updated if it doesn't - - already exist with the right content. This - - ensures that there's no race where it would have - - worked, but is unavailable due to being updated. -} - sshdir <- sshDir - let shim = sshdir "git-annex-shell" - let content = unlines - [ "#!/bin/sh" - , "set -e" - , "exec", base "runshell" ++ - " git-annex-shell -c \"$SSH_ORIGINAL_COMMAND\"" - ] - curr <- catchDefaultIO "" $ readFileStrict shim - when (curr /= content) $ do - createDirectoryIfMissing True (parentDir shim) - writeFile shim content - modifyFileMode shim $ addModes [ownerExecuteMode] + {- This shim is only updated if it doesn't + - already exist with the right content. This + - ensures that there's no race where it would have + - worked, but is unavailable due to being updated. -} + sshdir <- sshDir + let shim = sshdir "git-annex-shell" + let content = unlines + [ "#!/bin/sh" + , "set -e" + , "exec", base "runshell" ++ + " git-annex-shell -c \"$SSH_ORIGINAL_COMMAND\"" + ] + curr <- catchDefaultIO "" $ readFileStrict shim + when (curr /= content) $ do + createDirectoryIfMissing True (parentDir shim) + writeFile shim content + modifyFileMode shim $ addModes [ownerExecuteMode] diff --git a/Assistant/MakeRemote.hs b/Assistant/MakeRemote.hs index 8aa7cb2..479ebd3 100644 --- a/Assistant/MakeRemote.hs +++ b/Assistant/MakeRemote.hs @@ -8,9 +8,6 @@ module Assistant.MakeRemote where import Assistant.Common -import Assistant.ThreadedMonad -import Assistant.DaemonStatus -import Assistant.ScanRemotes import Assistant.Ssh import Assistant.Sync import qualified Types.Remote as R @@ -22,33 +19,33 @@ import qualified Git.Command import qualified Command.InitRemote import Logs.UUID import Logs.Remote +import Git.Remote import qualified Data.Text as T import qualified Data.Map as M -import Data.Char {- Sets up and begins syncing with a new ssh or rsync remote. -} -makeSshRemote :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> Bool -> SshData -> IO Remote -makeSshRemote st dstatus scanremotes forcersync sshdata = do - r <- runThreadState st $ +makeSshRemote :: Bool -> SshData -> Assistant Remote +makeSshRemote forcersync sshdata = do + r <- liftAnnex $ addRemote $ maker (sshRepoName sshdata) sshurl - syncNewRemote st dstatus scanremotes r + syncNewRemote r return r - where - rsync = forcersync || rsyncOnly sshdata - maker - | rsync = makeRsyncRemote - | otherwise = makeGitRemote - sshurl = T.unpack $ T.concat $ - if rsync - then [u, h, T.pack ":", sshDirectory sshdata, T.pack "/"] - else [T.pack "ssh://", u, h, d, T.pack "/"] - where - u = maybe (T.pack "") (\v -> T.concat [v, T.pack "@"]) $ sshUserName sshdata - h = sshHostName sshdata - d - | T.pack "/" `T.isPrefixOf` sshDirectory sshdata = d - | otherwise = T.concat [T.pack "/~/", sshDirectory sshdata] + where + rsync = forcersync || rsyncOnly sshdata + maker + | rsync = makeRsyncRemote + | otherwise = makeGitRemote + sshurl = T.unpack $ T.concat $ + if rsync + then [u, h, T.pack ":", sshDirectory sshdata, T.pack "/"] + else [T.pack "ssh://", u, h, d, T.pack "/"] + where + u = maybe (T.pack "") (\v -> T.concat [v, T.pack "@"]) $ sshUserName sshdata + h = sshHostName sshdata + d + | T.pack "/" `T.isPrefixOf` sshDirectory sshdata = sshDirectory sshdata + | otherwise = T.concat [T.pack "/~/", sshDirectory sshdata] {- Runs an action that returns a name of the remote, and finishes adding it. -} addRemote :: Annex String -> Annex Remote @@ -61,12 +58,12 @@ addRemote a = do makeRsyncRemote :: String -> String -> Annex String makeRsyncRemote name location = makeRemote name location $ const $ makeSpecialRemote name Rsync.remote config - where - config = M.fromList - [ ("encryption", "shared") - , ("rsyncurl", location) - , ("type", "rsync") - ] + where + config = M.fromList + [ ("encryption", "shared") + , ("rsyncurl", location) + , ("type", "rsync") + ] {- Inits a special remote. -} makeSpecialRemote :: String -> RemoteType -> R.RemoteConfig -> Annex () @@ -98,8 +95,8 @@ makeRemote basename location a = do a name return name else return basename - where - samelocation x = Git.repoLocation x == location + where + samelocation x = Git.repoLocation x == location {- Generate an unused name for a remote, adding a number if - necessary. @@ -109,12 +106,10 @@ uniqueRemoteName :: String -> Int -> Git.Repo -> String uniqueRemoteName basename n r | null namecollision = name | otherwise = uniqueRemoteName legalbasename (succ n) r - where - namecollision = filter samename (Git.remotes r) - samename x = Git.remoteName x == Just name - name - | n == 0 = legalbasename - | otherwise = legalbasename ++ show n - legalbasename = filter legal basename - legal '_' = True - legal c = isAlphaNum c + where + namecollision = filter samename (Git.remotes r) + samename x = Git.remoteName x == Just name + name + | n == 0 = legalbasename + | otherwise = legalbasename ++ show n + legalbasename = makeLegalName basename diff --git a/Assistant/Monad.hs b/Assistant/Monad.hs new file mode 100644 index 0000000..a676bc8 --- /dev/null +++ b/Assistant/Monad.hs @@ -0,0 +1,120 @@ +{- git-annex assistant monad + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +{-# LANGUAGE PackageImports, GeneralizedNewtypeDeriving, MultiParamTypeClasses #-} + +module Assistant.Monad ( + Assistant, + AssistantData(..), + newAssistantData, + runAssistant, + getAssistant, + liftAnnex, + (<~>), + (<<~), + asIO, + asIO1, + asIO2, +) where + +import "mtl" Control.Monad.Reader +import Control.Monad.Base (liftBase, MonadBase) + +import Common.Annex +import Assistant.Types.ThreadedMonad +import Assistant.Types.DaemonStatus +import Assistant.Types.ScanRemotes +import Assistant.Types.TransferQueue +import Assistant.Types.TransferSlots +import Assistant.Types.Pushes +import Assistant.Types.BranchChange +import Assistant.Types.Commits +import Assistant.Types.Changes +import Assistant.Types.Buddies +import Assistant.Types.NetMessager + +newtype Assistant a = Assistant { mkAssistant :: ReaderT AssistantData IO a } + deriving ( + Monad, + MonadIO, + MonadReader AssistantData, + Functor, + Applicative + ) + +instance MonadBase IO Assistant where + liftBase = Assistant . liftBase + +data AssistantData = AssistantData + { threadName :: String + , threadState :: ThreadState + , daemonStatusHandle :: DaemonStatusHandle + , scanRemoteMap :: ScanRemoteMap + , transferQueue :: TransferQueue + , transferSlots :: TransferSlots + , failedPushMap :: FailedPushMap + , commitChan :: CommitChan + , changeChan :: ChangeChan + , branchChangeHandle :: BranchChangeHandle + , buddyList :: BuddyList + , netMessager :: NetMessager + } + +newAssistantData :: ThreadState -> DaemonStatusHandle -> IO AssistantData +newAssistantData st dstatus = AssistantData + <$> pure "main" + <*> pure st + <*> pure dstatus + <*> newScanRemoteMap + <*> newTransferQueue + <*> newTransferSlots + <*> newFailedPushMap + <*> newCommitChan + <*> newChangeChan + <*> newBranchChangeHandle + <*> newBuddyList + <*> newNetMessager + +runAssistant :: AssistantData -> Assistant a -> IO a +runAssistant d a = runReaderT (mkAssistant a) d + +getAssistant :: (AssistantData -> a) -> Assistant a +getAssistant = reader + +{- Runs an action in the git-annex monad. Note that the same monad state + - is shared amoung all assistant threads, so only one of these can run at + - a time. Therefore, long-duration actions should be avoided. -} +liftAnnex :: Annex a -> Assistant a +liftAnnex a = do + st <- reader threadState + liftIO $ runThreadState st a + +{- Runs an IO action, passing it an IO action that runs an Assistant action. -} +(<~>) :: (IO a -> IO b) -> Assistant a -> Assistant b +io <~> a = do + d <- reader id + liftIO $ io $ runAssistant d a + +{- Creates an IO action that will run an Assistant action when run. -} +asIO :: Assistant a -> Assistant (IO a) +asIO a = do + d <- reader id + return $ runAssistant d a + +asIO1 :: (a -> Assistant b) -> Assistant (a -> IO b) +asIO1 a = do + d <- reader id + return $ \v -> runAssistant d $ a v + +asIO2 :: (a -> b -> Assistant c) -> Assistant (a -> b -> IO c) +asIO2 a = do + d <- reader id + return $ \v1 v2 -> runAssistant d (a v1 v2) + +{- Runs an IO action on a selected field of the AssistantData. -} +(<<~) :: (a -> IO b) -> (AssistantData -> a) -> Assistant b +io <<~ v = reader v >>= liftIO . io diff --git a/Assistant/NamedThread.hs b/Assistant/NamedThread.hs new file mode 100644 index 0000000..083252f --- /dev/null +++ b/Assistant/NamedThread.hs @@ -0,0 +1,30 @@ +{- git-annex assistant named threads. + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.NamedThread where + +import Assistant.Common +import Assistant.DaemonStatus +import Assistant.Alert + +import qualified Control.Exception as E + +runNamedThread :: NamedThread -> Assistant () +runNamedThread (NamedThread name a) = do + d <- getAssistant id + liftIO . go $ d { threadName = name } + where + go d = do + r <- E.try (runAssistant d a) :: IO (Either E.SomeException ()) + case r of + Right _ -> noop + Left e -> do + let msg = unwords [name, "crashed:", show e] + hPutStrLn stderr msg + -- TODO click to restart + runAssistant d $ void $ + addAlert $ warningAlert name msg diff --git a/Assistant/NetMessager.hs b/Assistant/NetMessager.hs new file mode 100644 index 0000000..d9450ad --- /dev/null +++ b/Assistant/NetMessager.hs @@ -0,0 +1,97 @@ +{- git-annex assistant out of band network messager interface + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.NetMessager where + +import Assistant.Common +import Assistant.Types.NetMessager +import qualified Types.Remote as Remote +import qualified Git + +import Control.Concurrent +import Control.Concurrent.STM +import Control.Concurrent.MSampleVar +import Control.Exception as E +import qualified Data.Set as S +import qualified Data.Text as T + +sendNetMessage :: NetMessage -> Assistant () +sendNetMessage m = + (atomically . flip writeTChan m) <<~ (netMessages . netMessager) + +waitNetMessage :: Assistant (NetMessage) +waitNetMessage = (atomically . readTChan) <<~ (netMessages . netMessager) + +notifyNetMessagerRestart :: Assistant () +notifyNetMessagerRestart = + flip writeSV () <<~ (netMessagerRestart . netMessager) + +waitNetMessagerRestart :: Assistant () +waitNetMessagerRestart = readSV <<~ (netMessagerRestart . netMessager) + +{- Runs an action that runs either the send or receive side of a push. + - + - While the push is running, netMessagesPush will get messages put into it + - relating to this push, while any messages relating to other pushes + - on the same side go to netMessagesDeferred. Once the push finishes, + - those deferred messages will be fed to handledeferred for processing. + -} +runPush :: PushSide -> ClientID -> (NetMessage -> Assistant ()) -> Assistant a -> Assistant a +runPush side clientid handledeferred a = do + nm <- getAssistant netMessager + let runningv = getSide side $ netMessagerPushRunning nm + let setup = void $ atomically $ swapTMVar runningv $ Just clientid + let cleanup = atomically $ do + void $ swapTMVar runningv Nothing + emptytchan (getSide side $ netMessagesPush nm) + r <- E.bracket_ setup cleanup <~> a + (void . forkIO) <~> processdeferred nm + return r + where + emptytchan c = maybe noop (const $ emptytchan c) =<< tryReadTChan c + processdeferred nm = do + s <- liftIO $ atomically $ swapTMVar (getSide side $ netMessagesPushDeferred nm) S.empty + mapM_ rundeferred (S.toList s) + rundeferred m = (void . (E.try :: (IO () -> IO (Either SomeException ())))) + <~> handledeferred m + +{- While a push is running, matching push messages are put into + - netMessagesPush, while others that involve the same side go to + - netMessagesDeferredPush. + - + - When no push is running involving the same side, returns False. + - + - To avoid bloating memory, only messages that initiate pushes are + - deferred. + -} +queueNetPushMessage :: NetMessage -> Assistant Bool +queueNetPushMessage m@(Pushing clientid stage) = do + nm <- getAssistant netMessager + liftIO $ atomically $ do + v <- readTMVar (getSide side $ netMessagerPushRunning nm) + case v of + Nothing -> return False + (Just runningclientid) + | runningclientid == clientid -> queue nm + | isPushInitiation stage -> defer nm + | otherwise -> discard + where + side = pushDestinationSide stage + queue nm = do + writeTChan (getSide side $ netMessagesPush nm) m + return True + defer nm = do + let mv = getSide side $ netMessagesPushDeferred nm + s <- takeTMVar mv + putTMVar mv $ S.insert m s + return True + discard = return True +queueNetPushMessage _ = return False + +waitNetPushMessage :: PushSide -> Assistant (NetMessage) +waitNetPushMessage side = (atomically . readTChan) + <<~ (getSide side . netMessagesPush . netMessager) diff --git a/Assistant/Pairing/MakeRemote.hs b/Assistant/Pairing/MakeRemote.hs index ab0bef1..38f9981 100644 --- a/Assistant/Pairing/MakeRemote.hs +++ b/Assistant/Pairing/MakeRemote.hs @@ -8,9 +8,6 @@ module Assistant.Pairing.MakeRemote where import Assistant.Common -import Assistant.ThreadedMonad -import Assistant.DaemonStatus -import Assistant.ScanRemotes import Assistant.Ssh import Assistant.Pairing import Assistant.Pairing.Network @@ -21,23 +18,22 @@ import qualified Data.Text as T {- Authorized keys are set up before pairing is complete, so that the other - side can immediately begin syncing. -} -setupAuthorizedKeys :: PairMsg -> IO () -setupAuthorizedKeys msg = do +setupAuthorizedKeys :: PairMsg -> FilePath -> IO () +setupAuthorizedKeys msg repodir = do validateSshPubKey pubkey - unlessM (liftIO $ addAuthorizedKeys False pubkey) $ + unlessM (liftIO $ addAuthorizedKeys False repodir pubkey) $ error "failed setting up ssh authorized keys" - where - pubkey = remoteSshPubKey $ pairMsgData msg + where + pubkey = remoteSshPubKey $ pairMsgData msg -{- When pairing is complete, this is used to set up the remote for the host - - we paired with. -} -finishedPairing :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> PairMsg -> SshKeyPair -> IO () -finishedPairing st dstatus scanremotes msg keypair = do - sshdata <- setupSshKeyPair keypair =<< pairMsgToSshData msg - {- Ensure that we know - - the ssh host key for the host we paired with. +{- When local pairing is complete, this is used to set up the remote for + - the host we paired with. -} +finishedLocalPairing :: PairMsg -> SshKeyPair -> Assistant () +finishedLocalPairing msg keypair = do + sshdata <- liftIO $ setupSshKeyPair keypair =<< pairMsgToSshData msg + {- Ensure that we know the ssh host key for the host we paired with. - If we don't, ssh over to get it. -} - unlessM (knownHost $ sshHostName sshdata) $ + liftIO $ unlessM (knownHost $ sshHostName sshdata) $ void $ sshTranscript [ sshOpt "StrictHostKeyChecking" "no" , sshOpt "NumberOfPasswordPrompts" "0" @@ -46,7 +42,7 @@ finishedPairing st dstatus scanremotes msg keypair = do , "git-annex-shell -c configlist " ++ T.unpack (sshDirectory sshdata) ] "" - void $ makeSshRemote st dstatus scanremotes False sshdata + void $ makeSshRemote False sshdata {- Mostly a straightforward conversion. Except: - * Determine the best hostname to use to contact the host. @@ -82,12 +78,12 @@ bestHostName msg = case remoteHostName $ pairMsgData msg of getAddrInfo Nothing (Just localname) Nothing maybe fallback (const $ return localname) (headMaybe addrs) Nothing -> fallback - where - fallback = do - let a = pairMsgAddr msg - let sockaddr = case a of - IPv4Addr addr -> SockAddrInet (PortNum 0) addr - IPv6Addr addr -> SockAddrInet6 (PortNum 0) 0 addr 0 - fromMaybe (showAddr a) - <$> catchDefaultIO Nothing - (fst <$> getNameInfo [] True False sockaddr) + where + fallback = do + let a = pairMsgAddr msg + let sockaddr = case a of + IPv4Addr addr -> SockAddrInet (PortNum 0) addr + IPv6Addr addr -> SockAddrInet6 (PortNum 0) 0 addr 0 + fromMaybe (showAddr a) + <$> catchDefaultIO Nothing + (fst <$> getNameInfo [] True False sockaddr) diff --git a/Assistant/Pairing/Network.hs b/Assistant/Pairing/Network.hs index a6289c0..44a63df 100644 --- a/Assistant/Pairing/Network.hs +++ b/Assistant/Pairing/Network.hs @@ -31,11 +31,12 @@ import Control.Concurrent pairingPort :: PortNumber pairingPort = 55556 -{- This is the All Hosts multicast group, which should reach all hosts - - on the same network segment. -} +{- Goal: Reach all hosts on the same network segment. + - Method: Use same address that avahi uses. Other broadcast addresses seem + - to not be let through some routers. -} multicastAddress :: SomeAddr -> HostName -multicastAddress (IPv4Addr _) = "224.0.0.1" -multicastAddress (IPv6Addr _) = "ff02::1" +multicastAddress (IPv4Addr _) = "224.0.0.251" +multicastAddress (IPv6Addr _) = "ff02::fb" {- Multicasts a message repeatedly on all interfaces, with a 2 second - delay between each transmission. The message is repeated forever @@ -49,47 +50,50 @@ multicastAddress (IPv6Addr _) = "ff02::1" -} multicastPairMsg :: Maybe Int -> Secret -> PairData -> PairStage -> IO () multicastPairMsg repeats secret pairdata stage = go M.empty repeats - where - go _ (Just 0) = noop - go cache n = do - addrs <- activeNetworkAddresses - let cache' = updatecache cache addrs - mapM_ (sendinterface cache') addrs - threadDelaySeconds (Seconds 2) - go cache' $ pred <$> n - {- The multicast library currently chokes on ipv6 addresses. -} - sendinterface _ (IPv6Addr _) = noop - sendinterface cache i = void $ catchMaybeIO $ - withSocketsDo $ bracket setup cleanup use - where - setup = multicastSender (multicastAddress i) pairingPort - cleanup (sock, _) = sClose sock -- FIXME does not work - use (sock, addr) = do - setInterface sock (showAddr i) - maybe noop (\s -> void $ sendTo sock s addr) - (M.lookup i cache) - updatecache cache [] = cache - updatecache cache (i:is) - | M.member i cache = updatecache cache is - | otherwise = updatecache (M.insert i (show $ mkmsg i) cache) is - mkmsg addr = PairMsg $ - mkVerifiable (stage, pairdata, addr) secret + where + go _ (Just 0) = noop + go cache n = do + addrs <- activeNetworkAddresses + let cache' = updatecache cache addrs + mapM_ (sendinterface cache') addrs + threadDelaySeconds (Seconds 2) + go cache' $ pred <$> n + {- The multicast library currently chokes on ipv6 addresses. -} + sendinterface _ (IPv6Addr _) = noop + sendinterface cache i = void $ catchMaybeIO $ + withSocketsDo $ bracket setup cleanup use + where + setup = multicastSender (multicastAddress i) pairingPort + cleanup (sock, _) = sClose sock -- FIXME does not work + use (sock, addr) = do + setInterface sock (showAddr i) + maybe noop (\s -> void $ sendTo sock s addr) + (M.lookup i cache) + updatecache cache [] = cache + updatecache cache (i:is) + | M.member i cache = updatecache cache is + | otherwise = updatecache (M.insert i (show $ mkmsg i) cache) is + mkmsg addr = PairMsg $ + mkVerifiable (stage, pairdata, addr) secret -startSending :: DaemonStatusHandle -> PairingInProgress -> PairStage -> (PairStage -> IO ()) -> IO () -startSending dstatus pip stage sender = void $ forkIO $ do - tid <- myThreadId - let pip' = pip { inProgressPairStage = stage, inProgressThreadId = Just tid } - oldpip <- modifyDaemonStatus dstatus $ - \s -> (s { pairingInProgress = Just pip' }, pairingInProgress s) - maybe noop stopold oldpip - sender stage - where - stopold = maybe noop killThread . inProgressThreadId +startSending :: PairingInProgress -> PairStage -> (PairStage -> IO ()) -> Assistant () +startSending pip stage sender = do + a <- asIO start + void $ liftIO $ forkIO a + where + start = do + tid <- liftIO myThreadId + let pip' = pip { inProgressPairStage = stage, inProgressThreadId = Just tid } + oldpip <- modifyDaemonStatus $ + \s -> (s { pairingInProgress = Just pip' }, pairingInProgress s) + maybe noop stopold oldpip + liftIO $ sender stage + stopold = maybe noop (liftIO . killThread) . inProgressThreadId -stopSending :: DaemonStatusHandle -> PairingInProgress -> IO () -stopSending dstatus pip = do - maybe noop killThread $ inProgressThreadId pip - modifyDaemonStatus_ dstatus $ \s -> s { pairingInProgress = Nothing } +stopSending :: PairingInProgress -> Assistant () +stopSending pip = do + maybe noop (liftIO . killThread) $ inProgressThreadId pip + modifyDaemonStatus_ $ \s -> s { pairingInProgress = Nothing } class ToSomeAddr a where toSomeAddr :: a -> SomeAddr @@ -122,5 +126,5 @@ pairRepo msg = concat , ":" , remoteDirectory d ] - where - d = pairMsgData msg + where + d = pairMsgData msg diff --git a/Assistant/Pushes.hs b/Assistant/Pushes.hs index f411dda..9765b6a 100644 --- a/Assistant/Pushes.hs +++ b/Assistant/Pushes.hs @@ -7,40 +7,34 @@ module Assistant.Pushes where -import Common.Annex +import Assistant.Common +import Assistant.Types.Pushes import Control.Concurrent.STM import Data.Time.Clock import qualified Data.Map as M -{- Track the most recent push failure for each remote. -} -type PushMap = M.Map Remote UTCTime -type FailedPushMap = TMVar PushMap - -{- The TMVar starts empty, and is left empty when there are no - - failed pushes. This way we can block until there are some failed pushes. - -} -newFailedPushMap :: IO FailedPushMap -newFailedPushMap = atomically newEmptyTMVar - {- Blocks until there are failed pushes. - Returns Remotes whose pushes failed a given time duration or more ago. - (This may be an empty list.) -} -getFailedPushesBefore :: FailedPushMap -> NominalDiffTime -> IO [Remote] -getFailedPushesBefore v duration = do - m <- atomically $ readTMVar v - now <- getCurrentTime - return $ M.keys $ M.filter (not . toorecent now) m - where - toorecent now time = now `diffUTCTime` time < duration +getFailedPushesBefore :: NominalDiffTime -> Assistant [Remote] +getFailedPushesBefore duration = do + v <- getAssistant failedPushMap + liftIO $ do + m <- atomically $ readTMVar v + now <- getCurrentTime + return $ M.keys $ M.filter (not . toorecent now) m + where + toorecent now time = now `diffUTCTime` time < duration {- Modifies the map. -} -changeFailedPushMap :: FailedPushMap -> (PushMap -> PushMap) -> IO () -changeFailedPushMap v a = atomically $ - store . a . fromMaybe M.empty =<< tryTakeTMVar v - where - {- tryTakeTMVar empties the TMVar; refill it only if - - the modified map is not itself empty -} - store m - | m == M.empty = noop - | otherwise = putTMVar v $! m +changeFailedPushMap :: (PushMap -> PushMap) -> Assistant () +changeFailedPushMap a = do + v <- getAssistant failedPushMap + liftIO $ atomically $ store v . a . fromMaybe M.empty =<< tryTakeTMVar v + where + {- tryTakeTMVar empties the TMVar; refill it only if + - the modified map is not itself empty -} + store v m + | m == M.empty = noop + | otherwise = putTMVar v $! m diff --git a/Assistant/ScanRemotes.hs b/Assistant/ScanRemotes.hs index 661c980..2743c0f 100644 --- a/Assistant/ScanRemotes.hs +++ b/Assistant/ScanRemotes.hs @@ -7,42 +7,35 @@ module Assistant.ScanRemotes where -import Common.Annex +import Assistant.Common +import Assistant.Types.ScanRemotes import qualified Types.Remote as Remote import Data.Function import Control.Concurrent.STM import qualified Data.Map as M -data ScanInfo = ScanInfo - { scanPriority :: Int - , fullScan :: Bool - } - -type ScanRemoteMap = TMVar (M.Map Remote ScanInfo) - -{- The TMVar starts empty, and is left empty when there are no remotes - - to scan. -} -newScanRemoteMap :: IO ScanRemoteMap -newScanRemoteMap = atomically newEmptyTMVar - {- Blocks until there is a remote or remotes that need to be scanned. - - The list has higher priority remotes listed first. -} -getScanRemote :: ScanRemoteMap -> IO [(Remote, ScanInfo)] -getScanRemote v = atomically $ - reverse . sortBy (compare `on` scanPriority . snd) . M.toList - <$> takeTMVar v +getScanRemote :: Assistant [(Remote, ScanInfo)] +getScanRemote = do + v <- getAssistant scanRemoteMap + liftIO $ atomically $ + reverse . sortBy (compare `on` scanPriority . snd) . M.toList + <$> takeTMVar v {- Adds new remotes that need scanning. -} -addScanRemotes :: ScanRemoteMap -> Bool -> [Remote] -> IO () -addScanRemotes _ _ [] = noop -addScanRemotes v full rs = atomically $ do - m <- fromMaybe M.empty <$> tryTakeTMVar v - putTMVar v $ M.unionWith merge (M.fromList $ zip rs (map info rs)) m - where - info r = ScanInfo (-1 * Remote.cost r) full - merge x y = ScanInfo - { scanPriority = max (scanPriority x) (scanPriority y) - , fullScan = fullScan x || fullScan y - } +addScanRemotes :: Bool -> [Remote] -> Assistant () +addScanRemotes _ [] = noop +addScanRemotes full rs = do + v <- getAssistant scanRemoteMap + liftIO $ atomically $ do + m <- fromMaybe M.empty <$> tryTakeTMVar v + putTMVar v $ M.unionWith merge (M.fromList $ zip rs (map info rs)) m + where + info r = ScanInfo (-1 * Remote.cost r) full + merge x y = ScanInfo + { scanPriority = max (scanPriority x) (scanPriority y) + , fullScan = fullScan x || fullScan y + } diff --git a/Assistant/Ssh.hs b/Assistant/Ssh.hs index 59ed344..01e44f3 100644 --- a/Assistant/Ssh.hs +++ b/Assistant/Ssh.hs @@ -9,6 +9,8 @@ module Assistant.Ssh where import Common.Annex import Utility.TempFile +import Utility.UserInfo +import Git.Remote import Data.Text (Text) import qualified Data.Text as T @@ -50,14 +52,11 @@ sshDir = do genSshHost :: Text -> Maybe Text -> String genSshHost host user = maybe "" (\v -> T.unpack v ++ "@") user ++ T.unpack host -{- host_dir, with all / in dir replaced by _, and bad characters removed -} +{- Generates a git remote name, like host_dir or host -} genSshRepoName :: String -> FilePath -> String genSshRepoName host dir - | null dir = filter legal host - | otherwise = filter legal $ host ++ "_" ++ replace "/" "_" dir - where - legal '_' = True - legal c = isAlphaNum c + | null dir = makeLegalName host + | otherwise = makeLegalName $ host ++ "_" ++ dir {- The output of ssh, including both stdout and stderr. -} sshTranscript :: [String] -> String -> IO (String, Bool) @@ -89,30 +88,40 @@ sshTranscript opts input = do hClose readh ok <- checkSuccessProcess pid - return () return (transcript, ok) {- Ensure that the ssh public key doesn't include any ssh options, like - command=foo, or other weirdness -} validateSshPubKey :: SshPubKey -> IO () -validateSshPubKey pubkey = do - let ws = words pubkey - when (length ws > 3 || length ws < 2) $ - error $ "wrong number of words in ssh public key " ++ pubkey - let (ssh, keytype) = separate (== '-') (ws !! 0) - unless (ssh == "ssh" && all isAlphaNum keytype) $ - error $ "bad ssh public key prefix " ++ ws !! 0 - when (length ws == 3) $ - unless (all (\c -> isAlphaNum c || c == '@') (ws !! 2)) $ - error $ "bad comment in ssh public key " ++ pubkey - -addAuthorizedKeys :: Bool -> SshPubKey -> IO Bool -addAuthorizedKeys rsynconly pubkey = boolSystem "sh" - [ Param "-c" , Param $ addAuthorizedKeysCommand rsynconly pubkey ] - -removeAuthorizedKeys :: Bool -> SshPubKey -> IO () -removeAuthorizedKeys rsynconly pubkey = do - let keyline = authorizedKeysLine rsynconly pubkey +validateSshPubKey pubkey = either error return $ check $ words pubkey + where + check [prefix, _key, comment] = do + checkprefix prefix + checkcomment comment + check [prefix, _key] = + checkprefix prefix + check _ = err "wrong number of words in ssh public key" + + ok = Right () + err msg = Left $ unwords [msg, pubkey] + + checkprefix prefix + | ssh == "ssh" && all isAlphaNum keytype = ok + | otherwise = err "bad ssh public key prefix" + where + (ssh, keytype) = separate (== '-') prefix + + checkcomment comment + | all (\c -> isAlphaNum c || c == '@' || c == '-' || c == '_') comment = ok + | otherwise = err "bad comment in ssh public key" + +addAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO Bool +addAuthorizedKeys rsynconly dir pubkey = boolSystem "sh" + [ Param "-c" , Param $ addAuthorizedKeysCommand rsynconly dir pubkey ] + +removeAuthorizedKeys :: Bool -> FilePath -> SshPubKey -> IO () +removeAuthorizedKeys rsynconly dir pubkey = do + let keyline = authorizedKeysLine rsynconly dir pubkey sshdir <- sshDir let keyfile = sshdir ".authorized_keys" ls <- lines <$> readFileStrict keyfile @@ -124,8 +133,8 @@ removeAuthorizedKeys rsynconly pubkey = do - The ~/.ssh/git-annex-shell wrapper script is created if not already - present. -} -addAuthorizedKeysCommand :: Bool -> SshPubKey -> String -addAuthorizedKeysCommand rsynconly pubkey = join "&&" +addAuthorizedKeysCommand :: Bool -> FilePath -> SshPubKey -> String +addAuthorizedKeysCommand rsynconly dir pubkey = join "&&" [ "mkdir -p ~/.ssh" , join "; " [ "if [ ! -e " ++ wrapper ++ " ]" @@ -137,27 +146,27 @@ addAuthorizedKeysCommand rsynconly pubkey = join "&&" , "chmod 600 ~/.ssh/authorized_keys" , unwords [ "echo" - , shellEscape $ authorizedKeysLine rsynconly pubkey + , shellEscape $ authorizedKeysLine rsynconly dir pubkey , ">>~/.ssh/authorized_keys" ] ] - where - echoval v = "echo " ++ shellEscape v - wrapper = "~/.ssh/git-annex-shell" - script = - [ "#!/bin/sh" - , "set -e" - , "exec git-annex-shell -c \"$SSH_ORIGINAL_COMMAND\"" - ] + where + echoval v = "echo " ++ shellEscape v + wrapper = "~/.ssh/git-annex-shell" + script = + [ "#!/bin/sh" + , "set -e" + , "exec git-annex-shell -c \"$SSH_ORIGINAL_COMMAND\"" + ] -authorizedKeysLine :: Bool -> SshPubKey -> String -authorizedKeysLine rsynconly pubkey +authorizedKeysLine :: Bool -> FilePath -> SshPubKey -> String +authorizedKeysLine rsynconly dir pubkey {- TODO: Locking down rsync is difficult, requiring a rather - long perl script. -} | rsynconly = pubkey | otherwise = limitcommand ++ pubkey - where - limitcommand = "command=\"~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding " + where + limitcommand = "command=\"GIT_ANNEX_SHELL_DIRECTORY="++shellEscape dir++" ~/.ssh/git-annex-shell\",no-agent-forwarding,no-port-forwarding,no-X11-forwarding " {- Generates a ssh key pair. -} genSshKeyPair :: IO SshKeyPair @@ -201,12 +210,12 @@ setupSshKeyPair sshkeypair sshdata = do ] return $ sshdata { sshHostName = T.pack mangledhost } - where - sshprivkeyfile = "key." ++ mangledhost - sshpubkeyfile = sshprivkeyfile ++ ".pub" - mangledhost = mangleSshHostName - (T.unpack $ sshHostName sshdata) - (T.unpack <$> sshUserName sshdata) + where + sshprivkeyfile = "key." ++ mangledhost + sshpubkeyfile = sshprivkeyfile ++ ".pub" + mangledhost = mangleSshHostName + (T.unpack $ sshHostName sshdata) + (T.unpack <$> sshUserName sshdata) mangleSshHostName :: String -> Maybe String -> String mangleSshHostName host user = "git-annex-" ++ host ++ (maybe "-" ('-':) user) @@ -215,8 +224,8 @@ unMangleSshHostName :: String -> String unMangleSshHostName h | "git-annex-" `isPrefixOf` h = join "-" (beginning $ drop 2 dashbits) | otherwise = h - where - dashbits = split "-" h + where + dashbits = split "-" h {- Does ssh have known_hosts data for a hostname? -} knownHost :: Text -> IO Bool @@ -226,7 +235,7 @@ knownHost hostname = do ( not . null <$> checkhost , return False ) - where - {- ssh-keygen -F can crash on some old known_hosts file -} - checkhost = catchDefaultIO "" $ - readProcess "ssh-keygen" ["-F", T.unpack hostname] + where + {- ssh-keygen -F can crash on some old known_hosts file -} + checkhost = catchDefaultIO "" $ + readProcess "ssh-keygen" ["-F", T.unpack hostname] diff --git a/Assistant/Sync.hs b/Assistant/Sync.hs index 6c167e2..ae2b5ea 100644 --- a/Assistant/Sync.hs +++ b/Assistant/Sync.hs @@ -9,8 +9,9 @@ module Assistant.Sync where import Assistant.Common import Assistant.Pushes +import Assistant.NetMessager +import Assistant.Types.NetMessager import Assistant.Alert -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Assistant.ScanRemotes import qualified Command.Sync @@ -36,32 +37,38 @@ import Control.Concurrent - the remotes have diverged from the local git-annex branch. Otherwise, - it's sufficient to requeue failed transfers. -} -reconnectRemotes :: ThreadName -> ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> [Remote] -> IO () -reconnectRemotes _ _ _ _ [] = noop -reconnectRemotes threadname st dstatus scanremotes rs = void $ - alertWhile dstatus (syncAlert rs) $ do +reconnectRemotes :: Bool -> [Remote] -> Assistant () +reconnectRemotes _ [] = noop +reconnectRemotes notifypushes rs = void $ do + alertWhile (syncAlert rs) $ do (ok, diverged) <- sync - =<< runThreadState st (inRepo Git.Branch.current) - addScanRemotes scanremotes diverged rs + =<< liftAnnex (inRepo Git.Branch.current) + addScanRemotes diverged rs return ok - where - (gitremotes, _specialremotes) = - partition (Git.repoIsUrl . Remote.repo) rs - sync (Just branch) = do - diverged <- manualPull st (Just branch) gitremotes - now <- getCurrentTime - ok <- pushToRemotes threadname now st Nothing gitremotes - return (ok, diverged) - {- No local branch exists yet, but we can try pulling. -} - sync Nothing = do - diverged <- manualPull st Nothing gitremotes - return (True, diverged) + where + gitremotes = filter (notspecialremote . Remote.repo) rs + notspecialremote r + | Git.repoIsUrl r = True + | Git.repoIsLocal r = True + | otherwise = False + sync (Just branch) = do + diverged <- snd <$> manualPull (Just branch) gitremotes + now <- liftIO getCurrentTime + ok <- pushToRemotes now notifypushes gitremotes + return (ok, diverged) + {- No local branch exists yet, but we can try pulling. -} + sync Nothing = do + diverged <- snd <$> manualPull Nothing gitremotes + return (True, diverged) {- Updates the local sync branch, then pushes it to all remotes, in - parallel, along with the git-annex branch. This is the same - as "git annex sync", except in parallel, and will co-exist with use of - "git annex sync". - + - After the pushes to normal git remotes, also signals XMPP clients that + - they can request an XMPP push. + - - Avoids running possibly long-duration commands in the Annex monad, so - as not to block other threads. - @@ -76,85 +83,95 @@ reconnectRemotes threadname st dstatus scanremotes rs = void $ - fallback mode, where our push is guarenteed to succeed if the remote is - reachable. If the fallback fails, the push is queued to be retried - later. - - - - The fallback mode pushes to branches on the remote that have our uuid in - - them. While ugly, those branches are reserved for pushing by us, and - - so our pushes will succeed. -} -pushToRemotes :: ThreadName -> UTCTime -> ThreadState -> Maybe FailedPushMap -> [Remote] -> IO Bool -pushToRemotes threadname now st mpushmap remotes = do - (g, branch, u) <- runThreadState st $ (,,) - <$> gitRepo - <*> inRepo Git.Branch.current - <*> getUUID - go True branch g u remotes - where - go _ Nothing _ _ _ = return True -- no branch, so nothing to do - go shouldretry (Just branch) g u rs = do - debug threadname - [ "pushing to" - , show rs - ] - Command.Sync.updateBranch (Command.Sync.syncBranch branch) g - (succeeded, failed) <- inParallel (push g branch) rs - updatemap succeeded [] - let ok = null failed - if ok - then return ok - else if shouldretry - then retry branch g u failed - else fallback branch g u failed +pushToRemotes :: UTCTime -> Bool -> [Remote] -> Assistant Bool +pushToRemotes now notifypushes remotes = do + (g, branch, u) <- liftAnnex $ do + Annex.Branch.commit "update" + (,,) + <$> gitRepo + <*> inRepo Git.Branch.current + <*> getUUID + let (xmppremotes, normalremotes) = partition isXMPPRemote remotes + ret <- go True branch g u normalremotes + forM_ xmppremotes $ \r -> + sendNetMessage $ Pushing (getXMPPClientID r) CanPush + return ret + where + go _ Nothing _ _ _ = return True -- no branch, so nothing to do + go _ _ _ _ [] = return True -- no remotes, so nothing to do + go shouldretry (Just branch) g u rs = do + debug ["pushing to", show rs] + liftIO $ Command.Sync.updateBranch (Command.Sync.syncBranch branch) g + (succeeded, failed) <- liftIO $ inParallel (push g branch) rs + updatemap succeeded [] + if null failed + then do + when notifypushes $ + sendNetMessage $ NotifyPush $ + map Remote.uuid succeeded + return True + else if shouldretry + then retry branch g u failed + else fallback branch g u failed - updatemap succeeded failed = case mpushmap of - Nothing -> noop - Just pushmap -> changeFailedPushMap pushmap $ \m -> - M.union (makemap failed) $ - M.difference m (makemap succeeded) - makemap l = M.fromList $ zip l (repeat now) + updatemap succeeded failed = changeFailedPushMap $ \m -> + M.union (makemap failed) $ + M.difference m (makemap succeeded) + makemap l = M.fromList $ zip l (repeat now) - retry branch g u rs = do - debug threadname [ "trying manual pull to resolve failed pushes" ] - void $ manualPull st (Just branch) rs - go False (Just branch) g u rs + retry branch g u rs = do + debug ["trying manual pull to resolve failed pushes"] + void $ manualPull (Just branch) rs + go False (Just branch) g u rs - fallback branch g u rs = do - debug threadname - [ "fallback pushing to" - , show rs - ] - (succeeded, failed) <- inParallel (pushfallback g u branch) rs - updatemap succeeded failed - return $ null failed - - push g branch remote = Command.Sync.pushBranch remote branch g - pushfallback g u branch remote = Git.Command.runBool "push" - [ Param $ Remote.name remote - , Param $ refspec Annex.Branch.name - , Param $ refspec branch - ] g - where - {- Push to refs/synced/uuid/branch; this - - avoids cluttering up the branch display. -} - refspec b = concat - [ s - , ":" - , "refs/synced/" ++ fromUUID u ++ "/" ++ s - ] - where s = show $ Git.Ref.base b + fallback branch g u rs = do + debug ["fallback pushing to", show rs] + (succeeded, failed) <- liftIO $ + inParallel (\r -> pushFallback u branch r g) rs + updatemap succeeded failed + when (notifypushes && (not $ null succeeded)) $ + sendNetMessage $ NotifyPush $ + map Remote.uuid succeeded + return $ null failed + + push g branch remote = Command.Sync.pushBranch remote branch g + +{- This fallback push mode pushes to branches on the remote that have our + - uuid in them. While ugly, those branches are reserved for pushing by us, + - and so our pushes will never conflict with other pushes. -} +pushFallback :: UUID -> Git.Ref -> Remote -> Git.Repo -> IO Bool +pushFallback u branch remote = Git.Command.runBool "push" params + where + params = + [ Param $ Remote.name remote + , Param $ refspec Annex.Branch.name + , Param $ refspec branch + ] + {- Push to refs/synced/uuid/branch; this + - avoids cluttering up the branch display. -} + refspec b = concat + [ s + , ":" + , "refs/synced/" ++ fromUUID u ++ "/" ++ s + ] + where s = show $ Git.Ref.base b {- Manually pull from remotes and merge their branches. -} -manualPull :: ThreadState -> Maybe Git.Ref -> [Remote] -> IO Bool -manualPull st currentbranch remotes = do - g <- runThreadState st gitRepo - forM_ remotes $ \r -> +manualPull :: Maybe Git.Ref -> [Remote] -> Assistant ([Bool], Bool) +manualPull currentbranch remotes = do + g <- liftAnnex gitRepo + results <- liftIO $ forM remotes $ \r -> Git.Command.runBool "fetch" [Param $ Remote.name r] g - haddiverged <- runThreadState st Annex.Branch.forceUpdate + haddiverged <- liftAnnex Annex.Branch.forceUpdate forM_ remotes $ \r -> - runThreadState st $ Command.Sync.mergeRemote r currentbranch - return haddiverged + liftAnnex $ Command.Sync.mergeRemote r currentbranch + return (results, haddiverged) {- Start syncing a newly added remote, using a background thread. -} -syncNewRemote :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> Remote -> IO () -syncNewRemote st dstatus scanremotes remote = do - runThreadState st $ updateSyncRemotes dstatus - void $ forkIO $ reconnectRemotes "SyncRemote" st dstatus scanremotes [remote] +syncNewRemote :: Remote -> Assistant () +syncNewRemote remote = do + updateSyncRemotes + thread <- asIO $ do + reconnectRemotes False [remote] + void $ liftIO $ forkIO $ thread diff --git a/Assistant/ThreadedMonad.hs b/Assistant/ThreadedMonad.hs deleted file mode 100644 index 7b915e1..0000000 --- a/Assistant/ThreadedMonad.hs +++ /dev/null @@ -1,38 +0,0 @@ -{- making the Annex monad available across threads - - - - Copyright 2012 Joey Hess - - - - Licensed under the GNU GPL version 3 or higher. - -} - -module Assistant.ThreadedMonad where - -import Common.Annex -import qualified Annex - -import Control.Concurrent -import Data.Tuple - -{- The Annex state is stored in a MVar, so that threaded actions can access - - it. -} -type ThreadState = MVar Annex.AnnexState - -{- Stores the Annex state in a MVar. - - - - Once the action is finished, retrieves the state from the MVar. - -} -withThreadState :: (ThreadState -> Annex a) -> Annex a -withThreadState a = do - state <- Annex.getState id - mvar <- liftIO $ newMVar state - r <- a mvar - newstate <- liftIO $ takeMVar mvar - Annex.changeState (const newstate) - return r - -{- Runs an Annex action, using the state from the MVar. - - - - This serializes calls by threads; only one thread can run in Annex at a - - time. -} -runThreadState :: ThreadState -> Annex a -> IO a -runThreadState mvar a = modifyMVar mvar $ \state -> swap <$> Annex.run state a diff --git a/Assistant/Threads/Committer.hs b/Assistant/Threads/Committer.hs index 6b036d0..445e44d 100644 --- a/Assistant/Threads/Committer.hs +++ b/Assistant/Threads/Committer.hs @@ -11,12 +11,12 @@ module Assistant.Threads.Committer where import Assistant.Common import Assistant.Changes +import Assistant.Types.Changes import Assistant.Commits import Assistant.Alert -import Assistant.ThreadedMonad +import Assistant.DaemonStatus import Assistant.Threads.Watcher import Assistant.TransferQueue -import Assistant.DaemonStatus import Logs.Transfer import qualified Annex.Queue import qualified Git.Command @@ -37,48 +37,39 @@ import Data.Tuple.Utils import qualified Data.Set as S import Data.Either -thisThread :: ThreadName -thisThread = "Committer" - {- This thread makes git commits at appropriate times. -} -commitThread :: ThreadState -> ChangeChan -> CommitChan -> TransferQueue -> DaemonStatusHandle -> NamedThread -commitThread st changechan commitchan transferqueue dstatus = thread $ do - delayadd <- runThreadState st $ +commitThread :: NamedThread +commitThread = NamedThread "Committer" $ do + delayadd <- liftAnnex $ maybe delayaddDefault (Just . Seconds) . readish <$> getConfig (annexConfig "delayadd") "" - runEvery (Seconds 1) $ do + runEvery (Seconds 1) <~> do -- We already waited one second as a simple rate limiter. -- Next, wait until at least one change is available for -- processing. - changes <- getChanges changechan + changes <- getChanges -- Now see if now's a good time to commit. - time <- getCurrentTime + time <- liftIO getCurrentTime if shouldCommit time changes then do - readychanges <- handleAdds delayadd st changechan transferqueue dstatus changes + readychanges <- handleAdds delayadd changes if shouldCommit time readychanges then do - debug thisThread + debug [ "committing" , show (length readychanges) , "changes" ] - void $ alertWhile dstatus commitAlert $ - runThreadState st commitStaged - recordCommit commitchan (Commit time) + void $ alertWhile commitAlert $ + liftAnnex commitStaged + recordCommit else refill readychanges else refill changes - where - thread = NamedThread thisThread - refill [] = noop - refill cs = do - debug thisThread - [ "delaying commit of" - , show (length cs) - , "changes" - ] - refillChanges changechan cs - + where + refill [] = noop + refill cs = do + debug ["delaying commit of", show (length cs), "changes"] + refillChanges cs commitStaged :: Annex Bool commitStaged = do @@ -99,12 +90,12 @@ commitStaged = do - each other out, etc. Git returns nonzero on those, - so don't propigate out commit failures. -} return True - where - nomessage ps - | Git.Version.older "1.7.2" = Param "-m" - : Param "autocommit" : ps - | otherwise = Param "--allow-empty-message" - : Param "-m" : Param "" : ps + where + nomessage ps + | Git.Version.older "1.7.2" = Param "-m" + : Param "autocommit" : ps + | otherwise = Param "--allow-empty-message" + : Param "-m" : Param "" : ps {- Decide if now is a good time to make a commit. - Note that the list of change times has an undefined order. @@ -118,9 +109,9 @@ shouldCommit now changes | len > 10000 = True -- avoid bloating queue too much | length (filter thisSecond changes) < 10 = True | otherwise = False -- batch activity - where - len = length changes - thisSecond c = now `diffUTCTime` changeTime c <= 1 + where + len = length changes + thisSecond c = now `diffUTCTime` changeTime c <= 1 {- OSX needs a short delay after a file is added before locking it down, - as pasting a file seems to try to set file permissions or otherwise @@ -152,77 +143,77 @@ delayaddDefault = Nothing - Any pending adds that are not ready yet are put back into the ChangeChan, - where they will be retried later. -} -handleAdds :: Maybe Seconds -> ThreadState -> ChangeChan -> TransferQueue -> DaemonStatusHandle -> [Change] -> IO [Change] -handleAdds delayadd st changechan transferqueue dstatus cs = returnWhen (null incomplete) $ do +handleAdds :: Maybe Seconds -> [Change] -> Assistant [Change] +handleAdds delayadd cs = returnWhen (null incomplete) $ do let (pending, inprocess) = partition isPendingAddChange incomplete pending' <- findnew pending - (postponed, toadd) <- partitionEithers <$> safeToAdd delayadd st pending' inprocess + (postponed, toadd) <- partitionEithers <$> safeToAdd delayadd pending' inprocess unless (null postponed) $ - refillChanges changechan postponed + refillChanges postponed returnWhen (null toadd) $ do added <- catMaybes <$> forM toadd add if DirWatcher.eventsCoalesce || null added then return $ added ++ otherchanges else do - r <- handleAdds delayadd st changechan transferqueue dstatus - =<< getChanges changechan + r <- handleAdds delayadd =<< getChanges return $ r ++ added ++ otherchanges - where - (incomplete, otherchanges) = partition (\c -> isPendingAddChange c || isInProcessAddChange c) cs + where + (incomplete, otherchanges) = partition (\c -> isPendingAddChange c || isInProcessAddChange c) cs - findnew [] = return [] - findnew pending = do - (!newfiles, cleanup) <- runThreadState st $ - inRepo (Git.LsFiles.notInRepo False $ map changeFile pending) - void cleanup - -- note: timestamp info is lost here - let ts = changeTime (pending !! 0) - return $ map (PendingAddChange ts) newfiles + findnew [] = return [] + findnew pending@(exemplar:_) = do + (!newfiles, cleanup) <- liftAnnex $ + inRepo (Git.LsFiles.notInRepo False $ map changeFile pending) + void $ liftIO cleanup + -- note: timestamp info is lost here + let ts = changeTime exemplar + return $ map (PendingAddChange ts) newfiles - returnWhen c a - | c = return otherchanges - | otherwise = a + returnWhen c a + | c = return otherchanges + | otherwise = a - add :: Change -> IO (Maybe Change) - add change@(InProcessAddChange { keySource = ks }) = - alertWhile' dstatus (addFileAlert $ keyFilename ks) $ - liftM ret $ catchMaybeIO $ - sanitycheck ks $ runThreadState st $ do + add :: Change -> Assistant (Maybe Change) + add change@(InProcessAddChange { keySource = ks }) = do + alertWhile' (addFileAlert $ keyFilename ks) $ + liftM ret $ catchMaybeIO <~> do + sanitycheck ks $ do + key <- liftAnnex $ do showStart "add" $ keyFilename ks - key <- Command.Add.ingest ks - done (finishedChange change) (keyFilename ks) key - where - {- Add errors tend to be transient and will - - be automatically dealt with, so don't - - pass to the alert code. -} - ret (Just j@(Just _)) = (True, j) - ret _ = (True, Nothing) - add _ = return Nothing + Command.Add.ingest ks + done (finishedChange change) (keyFilename ks) key + where + {- Add errors tend to be transient and will be automatically + - dealt with, so don't pass to the alert code. -} + ret (Just j@(Just _)) = (True, j) + ret _ = (True, Nothing) + add _ = return Nothing - done _ _ Nothing = do - showEndFail - return Nothing - done change file (Just key) = do - link <- Command.Add.link file key True - when DirWatcher.eventsCoalesce $ do + done _ _ Nothing = do + liftAnnex showEndFail + return Nothing + done change file (Just key) = do + link <- liftAnnex $ Command.Add.link file key True + when DirWatcher.eventsCoalesce $ + liftAnnex $ do sha <- inRepo $ Git.HashObject.hashObject BlobObject link stageSymlink file sha - queueTransfers Next transferqueue dstatus key (Just file) Upload - showEndOk - return $ Just change + showEndOk + queueTransfers Next key (Just file) Upload + return $ Just change - {- Check that the keysource's keyFilename still exists, - - and is still a hard link to its contentLocation, - - before ingesting it. -} - sanitycheck keysource a = do - fs <- getSymbolicLinkStatus $ keyFilename keysource - ks <- getSymbolicLinkStatus $ contentLocation keysource - if deviceID ks == deviceID fs && fileID ks == fileID fs - then a - else return Nothing + {- Check that the keysource's keyFilename still exists, + - and is still a hard link to its contentLocation, + - before ingesting it. -} + sanitycheck keysource a = do + fs <- liftIO $ getSymbolicLinkStatus $ keyFilename keysource + ks <- liftIO $ getSymbolicLinkStatus $ contentLocation keysource + if deviceID ks == deviceID fs && fileID ks == fileID fs + then a + else return Nothing {- Files can Either be Right to be added now, - or are unsafe, and must be Left for later. @@ -230,11 +221,11 @@ handleAdds delayadd st changechan transferqueue dstatus cs = returnWhen (null in - Check by running lsof on the temp directory, which - the KeySources are locked down in. -} -safeToAdd :: Maybe Seconds -> ThreadState -> [Change] -> [Change] -> IO [Either Change Change] -safeToAdd _ _ [] [] = return [] -safeToAdd delayadd st pending inprocess = do - maybe noop threadDelaySeconds delayadd - runThreadState st $ do +safeToAdd :: Maybe Seconds -> [Change] -> [Change] -> Assistant [Either Change Change] +safeToAdd _ [] [] = return [] +safeToAdd delayadd pending inprocess = do + maybe noop (liftIO . threadDelaySeconds) delayadd + liftAnnex $ do keysources <- mapM Command.Add.lockDown (map changeFile pending) let inprocess' = map mkinprocess (zip pending keysources) tmpdir <- fromRepo gitAnnexTmpDir @@ -250,25 +241,24 @@ safeToAdd delayadd st pending inprocess = do mapM_ canceladd $ lefts checked allRight $ rights checked else return checked - where - check openfiles change@(InProcessAddChange { keySource = ks }) - | S.member (contentLocation ks) openfiles = Left change - check _ change = Right change + where + check openfiles change@(InProcessAddChange { keySource = ks }) + | S.member (contentLocation ks) openfiles = Left change + check _ change = Right change - mkinprocess (c, ks) = InProcessAddChange - { changeTime = changeTime c - , keySource = ks - } + mkinprocess (c, ks) = InProcessAddChange + { changeTime = changeTime c + , keySource = ks + } - canceladd (InProcessAddChange { keySource = ks }) = do - warning $ keyFilename ks - ++ " still has writers, not adding" - -- remove the hard link - void $ liftIO $ tryIO $ - removeFile $ contentLocation ks - canceladd _ = noop + canceladd (InProcessAddChange { keySource = ks }) = do + warning $ keyFilename ks + ++ " still has writers, not adding" + -- remove the hard link + void $ liftIO $ tryIO $ removeFile $ contentLocation ks + canceladd _ = noop - openwrite (_file, mode, _pid) = - mode == Lsof.OpenWriteOnly || mode == Lsof.OpenReadWrite + openwrite (_file, mode, _pid) = + mode == Lsof.OpenWriteOnly || mode == Lsof.OpenReadWrite - allRight = return . map Right + allRight = return . map Right diff --git a/Assistant/Threads/ConfigMonitor.hs b/Assistant/Threads/ConfigMonitor.hs new file mode 100644 index 0000000..2d012ad --- /dev/null +++ b/Assistant/Threads/ConfigMonitor.hs @@ -0,0 +1,88 @@ +{- git-annex assistant config monitor thread + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Threads.ConfigMonitor where + +import Assistant.Common +import Assistant.BranchChange +import Assistant.DaemonStatus +import Assistant.Commits +import Utility.ThreadScheduler +import Logs.UUID +import Logs.Trust +import Logs.Remote +import Logs.PreferredContent +import Logs.Group +import Remote.List (remoteListRefresh) +import qualified Git.LsTree as LsTree +import qualified Annex.Branch + +import qualified Data.Set as S + +thisThread :: ThreadName +thisThread = "ConfigMonitor" + +{- This thread detects when configuration changes have been made to the + - git-annex branch and reloads cached configuration. + - + - If the branch is frequently changing, it's checked for configuration + - changes no more often than once every 60 seconds. On the other hand, + - if the branch has not changed in a while, configuration changes will + - be detected immediately. + -} +configMonitorThread :: NamedThread +configMonitorThread = NamedThread "ConfigMonitor" $ loop =<< getConfigs + where + loop old = do + waitBranchChange + new <- getConfigs + when (old /= new) $ do + let changedconfigs = new `S.difference` old + debug $ "reloading config" : + map fst (S.toList changedconfigs) + reloadConfigs new + {- Record a commit to get this config + - change pushed out to remotes. -} + recordCommit + liftIO $ threadDelaySeconds (Seconds 60) + loop new + +{- Config files, and their checksums. -} +type Configs = S.Set (FilePath, String) + +{- All git-annex's config files, and actions to run when they change. -} +configFilesActions :: [(FilePath, Annex ())] +configFilesActions = + [ (uuidLog, void $ uuidMapLoad) + , (remoteLog, void remoteListRefresh) + , (trustLog, void trustMapLoad) + , (groupLog, void groupMapLoad) + -- Preferred content settings depend on most of the other configs, + -- so will be reloaded whenever any configs change. + , (preferredContentLog, noop) + ] + +reloadConfigs :: Configs -> Assistant () +reloadConfigs changedconfigs = do + liftAnnex $ do + sequence_ as + void preferredContentMapLoad + {- Changes to the remote log, or the trust log, can affect the + - syncRemotes list -} + when (Logs.Remote.remoteLog `elem` fs || Logs.Trust.trustLog `elem` fs) $ + updateSyncRemotes + where + (fs, as) = unzip $ filter (flip S.member changedfiles . fst) + configFilesActions + changedfiles = S.map fst changedconfigs + +getConfigs :: Assistant Configs +getConfigs = S.fromList . map extract + <$> liftAnnex (inRepo $ LsTree.lsTreeFiles Annex.Branch.fullname files) + where + files = map fst configFilesActions + extract treeitem = (LsTree.file treeitem, LsTree.sha treeitem) diff --git a/Assistant/Threads/DaemonStatus.hs b/Assistant/Threads/DaemonStatus.hs index f3174c8..07f0986 100644 --- a/Assistant/Threads/DaemonStatus.hs +++ b/Assistant/Threads/DaemonStatus.hs @@ -9,28 +9,21 @@ module Assistant.Threads.DaemonStatus where import Assistant.Common import Assistant.DaemonStatus -import Assistant.ThreadedMonad import Utility.ThreadScheduler import Utility.NotificationBroadcaster -thisThread :: ThreadName -thisThread = "DaemonStatus" - {- This writes the daemon status to disk, when it changes, but no more - frequently than once every ten minutes. -} -daemonStatusThread :: ThreadState -> DaemonStatusHandle -> NamedThread -daemonStatusThread st dstatus = thread $ do - notifier <- newNotificationHandle - =<< changeNotifier <$> getDaemonStatus dstatus +daemonStatusThread :: NamedThread +daemonStatusThread = NamedThread "DaemonStatus" $ do + notifier <- liftIO . newNotificationHandle + =<< changeNotifier <$> getDaemonStatus checkpoint - runEvery (Seconds tenMinutes) $ do - waitNotification notifier + runEvery (Seconds tenMinutes) <~> do + liftIO $ waitNotification notifier checkpoint - where - thread = NamedThread thisThread - checkpoint = do - status <- getDaemonStatus dstatus - file <- runThreadState st $ fromRepo gitAnnexDaemonStatusFile - writeDaemonStatusFile file status - + where + checkpoint = do + file <- liftAnnex $ fromRepo gitAnnexDaemonStatusFile + liftIO . writeDaemonStatusFile file =<< getDaemonStatus diff --git a/Assistant/Threads/Merger.hs b/Assistant/Threads/Merger.hs index 46f5162..105f0cc 100644 --- a/Assistant/Threads/Merger.hs +++ b/Assistant/Threads/Merger.hs @@ -8,9 +8,8 @@ module Assistant.Threads.Merger where import Assistant.Common -import Assistant.ThreadedMonad -import Assistant.DaemonStatus import Assistant.TransferQueue +import Assistant.BranchChange import Utility.DirWatcher import Utility.Types.DirWatcher import qualified Annex.Branch @@ -23,36 +22,34 @@ thisThread = "Merger" {- This thread watches for changes to .git/refs/, and handles incoming - pushes. -} -mergeThread :: ThreadState -> DaemonStatusHandle -> TransferQueue -> NamedThread -mergeThread st dstatus transferqueue = thread $ do - g <- runThreadState st gitRepo +mergeThread :: NamedThread +mergeThread = NamedThread "Merger" $ do + g <- liftAnnex gitRepo let dir = Git.localGitDir g "refs" - createDirectoryIfMissing True dir - let hook a = Just $ runHandler st dstatus transferqueue a + liftIO $ createDirectoryIfMissing True dir + let hook a = Just <$> asIO2 (runHandler a) + addhook <- hook onAdd + errhook <- hook onErr let hooks = mkWatchHooks - { addHook = hook onAdd - , errHook = hook onErr + { addHook = addhook + , errHook = errhook } - void $ watchDir dir (const False) hooks id - debug thisThread ["watching", dir] - where - thread = NamedThread thisThread + void $ liftIO $ watchDir dir (const False) hooks id + debug ["watching", dir] -type Handler = ThreadState -> DaemonStatusHandle -> TransferQueue -> FilePath -> Maybe FileStatus -> IO () +type Handler = FilePath -> Assistant () {- Runs an action handler. - - Exceptions are ignored, otherwise a whole thread could be crashed. -} -runHandler :: ThreadState -> DaemonStatusHandle -> TransferQueue -> Handler -> FilePath -> Maybe FileStatus -> IO () -runHandler st dstatus transferqueue handler file filestatus = void $ - either print (const noop) =<< tryIO go - where - go = handler st dstatus transferqueue file filestatus +runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant () +runHandler handler file _filestatus = + either (liftIO . print) (const noop) =<< tryIO <~> handler file {- Called when there's an error with inotify. -} onErr :: Handler -onErr _ _ _ msg _ = error msg +onErr msg = error msg {- Called when a new branch ref is written. - @@ -66,39 +63,38 @@ onErr _ _ _ msg _ = error msg - ran are merged in. -} onAdd :: Handler -onAdd st dstatus transferqueue file _ +onAdd file | ".lock" `isSuffixOf` file = noop - | isAnnexBranch file = runThreadState st $ - whenM Annex.Branch.forceUpdate $ - queueDeferredDownloads Later transferqueue dstatus - | "/synced/" `isInfixOf` file = runThreadState st $ do - mergecurrent =<< inRepo Git.Branch.current + | isAnnexBranch file = do + branchChanged + whenM (liftAnnex Annex.Branch.forceUpdate) $ + queueDeferredDownloads Later + | "/synced/" `isInfixOf` file = do + mergecurrent =<< liftAnnex (inRepo Git.Branch.current) | otherwise = noop - where - changedbranch = fileToBranch file - mergecurrent (Just current) - | equivBranches changedbranch current = do - liftIO $ debug thisThread - [ "merging" - , show changedbranch - , "into" - , show current - ] - void $ inRepo $ - Git.Merge.mergeNonInteractive changedbranch - mergecurrent _ = noop + where + changedbranch = fileToBranch file + mergecurrent (Just current) + | equivBranches changedbranch current = do + debug + [ "merging", show changedbranch + , "into", show current + ] + void $ liftAnnex $ inRepo $ + Git.Merge.mergeNonInteractive changedbranch + mergecurrent _ = noop equivBranches :: Git.Ref -> Git.Ref -> Bool equivBranches x y = base x == base y - where - base = takeFileName . show + where + base = takeFileName . show isAnnexBranch :: FilePath -> Bool isAnnexBranch f = n `isSuffixOf` f - where - n = "/" ++ show Annex.Branch.name + where + n = "/" ++ show Annex.Branch.name fileToBranch :: FilePath -> Git.Ref fileToBranch f = Git.Ref $ "refs" base - where - base = Prelude.last $ split "/refs/" f + where + base = Prelude.last $ split "/refs/" f diff --git a/Assistant/Threads/MountWatcher.hs b/Assistant/Threads/MountWatcher.hs index 462f584..fa7d4ec 100644 --- a/Assistant/Threads/MountWatcher.hs +++ b/Assistant/Threads/MountWatcher.hs @@ -11,9 +11,7 @@ module Assistant.Threads.MountWatcher where import Assistant.Common -import Assistant.ThreadedMonad import Assistant.DaemonStatus -import Assistant.ScanRemotes import Assistant.Sync import qualified Annex import qualified Git @@ -38,74 +36,80 @@ import qualified Control.Exception as E thisThread :: ThreadName thisThread = "MountWatcher" -mountWatcherThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> NamedThread -mountWatcherThread st handle scanremotes = thread $ +mountWatcherThread :: NamedThread +mountWatcherThread = NamedThread "MountWatcher" $ #if WITH_DBUS - dbusThread st handle scanremotes + dbusThread #else - pollingThread st handle scanremotes + pollingThread #endif - where - thread = NamedThread thisThread #if WITH_DBUS -dbusThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> IO () -dbusThread st dstatus scanremotes = E.catch (go =<< connectSession) onerr - where - go client = ifM (checkMountMonitor client) - ( do - {- Store the current mount points in an mvar, - - to be compared later. We could in theory - - work out the mount point from the dbus - - message, but this is easier. -} - mvar <- newMVar =<< currentMountPoints - forM_ mountChanged $ \matcher -> - listen client matcher $ \_event -> do - nowmounted <- currentMountPoints - wasmounted <- swapMVar mvar nowmounted - handleMounts st dstatus scanremotes wasmounted nowmounted - , do - runThreadState st $ - warning "No known volume monitor available through dbus; falling back to mtab polling" - pollinstead - ) - onerr :: E.SomeException -> IO () - onerr e = do - runThreadState st $ - warning $ "Failed to use dbus; falling back to mtab polling (" ++ show e ++ ")" - pollinstead - pollinstead = pollingThread st dstatus scanremotes +dbusThread :: Assistant () +dbusThread = do + runclient <- asIO1 go + r <- liftIO $ E.try $ runClient getSessionAddress runclient + either onerr (const noop) r + where + go client = ifM (checkMountMonitor client) + ( do + {- Store the current mount points in an MVar, to be + - compared later. We could in theory work out the + - mount point from the dbus message, but this is + - easier. -} + mvar <- liftIO $ newMVar =<< currentMountPoints + handleevent <- asIO1 $ \_event -> do + nowmounted <- liftIO $ currentMountPoints + wasmounted <- liftIO $ swapMVar mvar nowmounted + handleMounts wasmounted nowmounted + liftIO $ forM_ mountChanged $ \matcher -> + listen client matcher handleevent + , do + liftAnnex $ + warning "No known volume monitor available through dbus; falling back to mtab polling" + pollingThread + ) + onerr :: E.SomeException -> Assistant () + onerr e = do + {- If the session dbus fails, the user probably + - logged out of their desktop. Even if they log + - back in, we won't have access to the dbus + - session key, so polling is the best that can be + - done in this situation. -} + liftAnnex $ + warning $ "dbus failed; falling back to mtab polling (" ++ show e ++ ")" + pollingThread {- Examine the list of services connected to dbus, to see if there - are any we can use to monitor mounts. If not, will attempt to start one. -} -checkMountMonitor :: Client -> IO Bool +checkMountMonitor :: Client -> Assistant Bool checkMountMonitor client = do running <- filter (`elem` usableservices) - <$> listServiceNames client + <$> liftIO (listServiceNames client) case running of [] -> startOneService client startableservices (service:_) -> do - debug thisThread [ "Using running DBUS service" + debug [ "Using running DBUS service" , service , "to monitor mount events." ] return True - where - startableservices = [gvfs] - usableservices = startableservices ++ [kde] - gvfs = "org.gtk.Private.GduVolumeMonitor" - kde = "org.kde.DeviceNotifications" + where + startableservices = [gvfs] + usableservices = startableservices ++ [kde] + gvfs = "org.gtk.Private.GduVolumeMonitor" + kde = "org.kde.DeviceNotifications" -startOneService :: Client -> [ServiceName] -> IO Bool +startOneService :: Client -> [ServiceName] -> Assistant Bool startOneService _ [] = return False startOneService client (x:xs) = do - _ <- callDBus client "StartServiceByName" + _ <- liftIO $ callDBus client "StartServiceByName" [toVariant x, toVariant (0 :: Word32)] - ifM (elem x <$> listServiceNames client) + ifM (liftIO $ elem x <$> listServiceNames client) ( do - debug thisThread [ "Started DBUS service" - , x + debug + [ "Started DBUS service", x , "to monitor mount events." ] return True @@ -115,48 +119,47 @@ startOneService client (x:xs) = do {- Filter matching events recieved when drives are mounted and unmounted. -} mountChanged :: [MatchRule] mountChanged = [gvfs True, gvfs False, kde, kdefallback] - where - {- gvfs reliably generates this event whenever a drive is mounted/unmounted, - - whether automatically, or manually -} - gvfs mount = matchAny - { matchInterface = Just "org.gtk.Private.RemoteVolumeMonitor" - , matchMember = Just $ if mount then "MountAdded" else "MountRemoved" - } - {- This event fires when KDE prompts the user what to do with a drive, - - but maybe not at other times. And it's not received -} - kde = matchAny - { matchInterface = Just "org.kde.Solid.Device" - , matchMember = Just "setupDone" - } - {- This event may not be closely related to mounting a drive, but it's - - observed reliably when a drive gets mounted or unmounted. -} - kdefallback = matchAny - { matchInterface = Just "org.kde.KDirNotify" - , matchMember = Just "enteredDirectory" - } + where + {- gvfs reliably generates this event whenever a + - drive is mounted/unmounted, whether automatically, or manually -} + gvfs mount = matchAny + { matchInterface = Just "org.gtk.Private.RemoteVolumeMonitor" + , matchMember = Just $ if mount then "MountAdded" else "MountRemoved" + } + {- This event fires when KDE prompts the user what to do with a drive, + - but maybe not at other times. And it's not received -} + kde = matchAny + { matchInterface = Just "org.kde.Solid.Device" + , matchMember = Just "setupDone" + } + {- This event may not be closely related to mounting a drive, but it's + - observed reliably when a drive gets mounted or unmounted. -} + kdefallback = matchAny + { matchInterface = Just "org.kde.KDirNotify" + , matchMember = Just "enteredDirectory" + } #endif -pollingThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> IO () -pollingThread st dstatus scanremotes = go =<< currentMountPoints - where - go wasmounted = do - threadDelaySeconds (Seconds 10) - nowmounted <- currentMountPoints - handleMounts st dstatus scanremotes wasmounted nowmounted - go nowmounted - -handleMounts :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> MountPoints -> MountPoints -> IO () -handleMounts st dstatus scanremotes wasmounted nowmounted = - mapM_ (handleMount st dstatus scanremotes . mnt_dir) $ +pollingThread :: Assistant () +pollingThread = go =<< liftIO currentMountPoints + where + go wasmounted = do + liftIO $ threadDelaySeconds (Seconds 10) + nowmounted <- liftIO currentMountPoints + handleMounts wasmounted nowmounted + go nowmounted + +handleMounts :: MountPoints -> MountPoints -> Assistant () +handleMounts wasmounted nowmounted = + mapM_ (handleMount . mnt_dir) $ S.toList $ newMountPoints wasmounted nowmounted -handleMount :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> FilePath -> IO () -handleMount st dstatus scanremotes dir = do - debug thisThread ["detected mount of", dir] - reconnectRemotes thisThread st dstatus scanremotes - =<< filter (Git.repoIsLocal . Remote.repo) - <$> remotesUnder st dstatus dir +handleMount :: FilePath -> Assistant () +handleMount dir = do + debug ["detected mount of", dir] + rs <- filter (Git.repoIsLocal . Remote.repo) <$> remotesUnder dir + reconnectRemotes True rs {- Finds remotes located underneath the mount point. - @@ -166,21 +169,21 @@ handleMount st dstatus scanremotes dir = do - at startup time, or may have changed (it could even be a different - repository at the same remote location..) -} -remotesUnder :: ThreadState -> DaemonStatusHandle -> FilePath -> IO [Remote] -remotesUnder st dstatus dir = runThreadState st $ do - repotop <- fromRepo Git.repoPath - rs <- remoteList - pairs <- mapM (checkremote repotop) rs +remotesUnder :: FilePath -> Assistant [Remote] +remotesUnder dir = do + repotop <- liftAnnex $ fromRepo Git.repoPath + rs <- liftAnnex remoteList + pairs <- liftAnnex $ mapM (checkremote repotop) rs let (waschanged, rs') = unzip pairs when (any id waschanged) $ do - Annex.changeState $ \s -> s { Annex.remotes = rs' } - updateSyncRemotes dstatus + liftAnnex $ Annex.changeState $ \s -> s { Annex.remotes = rs' } + updateSyncRemotes return $ map snd $ filter fst pairs - where - checkremote repotop r = case Remote.localpath r of - Just p | dirContains dir (absPathFrom repotop p) -> - (,) <$> pure True <*> updateRemote r - _ -> return (False, r) + where + checkremote repotop r = case Remote.localpath r of + Just p | dirContains dir (absPathFrom repotop p) -> + (,) <$> pure True <*> updateRemote r + _ -> return (False, r) type MountPoints = S.Set Mntent diff --git a/Assistant/Threads/NetWatcher.hs b/Assistant/Threads/NetWatcher.hs index a8daa94..c5a48ad 100644 --- a/Assistant/Threads/NetWatcher.hs +++ b/Assistant/Threads/NetWatcher.hs @@ -11,9 +11,6 @@ module Assistant.Threads.NetWatcher where import Assistant.Common -import Assistant.ThreadedMonad -import Assistant.DaemonStatus -import Assistant.ScanRemotes import Assistant.Sync import Utility.ThreadScheduler import Remote.List @@ -24,73 +21,72 @@ import Utility.DBus import DBus.Client import DBus import Data.Word (Word32) -import qualified Control.Exception as E +import Assistant.NetMessager #else #warning Building without dbus support; will poll for network connection changes #endif -thisThread :: ThreadName -thisThread = "NetWatcher" - -netWatcherThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> NamedThread +netWatcherThread :: NamedThread #if WITH_DBUS -netWatcherThread st dstatus scanremotes = thread $ - dbusThread st dstatus scanremotes +netWatcherThread = thread dbusThread #else -netWatcherThread _ _ _ = thread noop +netWatcherThread = thread noop #endif - where - thread = NamedThread thisThread + where + thread = NamedThread "NetWatcher" {- This is a fallback for when dbus cannot be used to detect - network connection changes, but it also ensures that - any networked remotes that may have not been routable for a - while (despite the local network staying up), are synced with - periodically. -} -netWatcherFallbackThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> NamedThread -netWatcherFallbackThread st dstatus scanremotes = thread $ - runEvery (Seconds 3600) $ - handleConnection st dstatus scanremotes - where - thread = NamedThread thisThread +netWatcherFallbackThread :: NamedThread +netWatcherFallbackThread = NamedThread "NetWatcherFallback" $ + runEvery (Seconds 3600) <~> handleConnection #if WITH_DBUS -dbusThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> IO () -dbusThread st dstatus scanremotes = E.catch (go =<< connectSystem) onerr - where - go client = ifM (checkNetMonitor client) - ( do - listenNMConnections client handle - listenWicdConnections client handle - , do - runThreadState st $ - warning "No known network monitor available through dbus; falling back to polling" - ) - onerr :: E.SomeException -> IO () - onerr e = runThreadState st $ - warning $ "Failed to use dbus; falling back to polling (" ++ show e ++ ")" - handle = do - debug thisThread ["detected network connection"] - handleConnection st dstatus scanremotes +dbusThread :: Assistant () +dbusThread = do + handleerr <- asIO2 onerr + runclient <- asIO1 go + liftIO $ persistentClient getSystemAddress () handleerr runclient + where + go client = ifM (checkNetMonitor client) + ( do + listenNMConnections client <~> handleconn + listenWicdConnections client <~> handleconn + , do + liftAnnex $ + warning "No known network monitor available through dbus; falling back to polling" + ) + handleconn = do + debug ["detected network connection"] + notifyNetMessagerRestart + handleConnection + onerr e _ = do + liftAnnex $ + warning $ "lost dbus connection; falling back to polling (" ++ show e ++ ")" + {- Wait, in hope that dbus will come back -} + liftIO $ threadDelaySeconds (Seconds 60) {- Examine the list of services connected to dbus, to see if there - are any we can use to monitor network connections. -} -checkNetMonitor :: Client -> IO Bool +checkNetMonitor :: Client -> Assistant Bool checkNetMonitor client = do - running <- filter (`elem` [networkmanager, wicd]) + running <- liftIO $ filter (`elem` [networkmanager, wicd]) <$> listServiceNames client case running of [] -> return False (service:_) -> do - debug thisThread [ "Using running DBUS service" + debug [ "Using running DBUS service" , service , "to monitor network connection events." ] return True - where - networkmanager = "org.freedesktop.NetworkManager" - wicd = "org.wicd.daemon" + where + networkmanager = "org.freedesktop.NetworkManager" + wicd = "org.wicd.daemon" {- Listens for new NetworkManager connections. -} listenNMConnections :: Client -> IO () -> IO () @@ -98,18 +94,18 @@ listenNMConnections client callback = listen client matcher $ \event -> when (Just True == anyM activeconnection (signalBody event)) $ callback - where - matcher = matchAny - { matchInterface = Just "org.freedesktop.NetworkManager.Connection.Active" - , matchMember = Just "PropertiesChanged" - } - nm_connection_activated = toVariant (2 :: Word32) - nm_state_key = toVariant ("State" :: String) - activeconnection v = do - m <- fromVariant v - vstate <- lookup nm_state_key $ dictionaryItems m - state <- fromVariant vstate - return $ state == nm_connection_activated + where + matcher = matchAny + { matchInterface = Just "org.freedesktop.NetworkManager.Connection.Active" + , matchMember = Just "PropertiesChanged" + } + nm_connection_activated = toVariant (2 :: Word32) + nm_state_key = toVariant ("State" :: String) + activeconnection v = do + m <- fromVariant v + vstate <- lookup nm_state_key $ dictionaryItems m + state <- fromVariant vstate + return $ state == nm_connection_activated {- Listens for new Wicd connections. -} listenWicdConnections :: Client -> IO () -> IO () @@ -117,21 +113,19 @@ listenWicdConnections client callback = listen client matcher $ \event -> when (any (== wicd_success) (signalBody event)) $ callback - where - matcher = matchAny - { matchInterface = Just "org.wicd.daemon" - , matchMember = Just "ConnectResultsSent" - } - wicd_success = toVariant ("success" :: String) + where + matcher = matchAny + { matchInterface = Just "org.wicd.daemon" + , matchMember = Just "ConnectResultsSent" + } + wicd_success = toVariant ("success" :: String) #endif -handleConnection :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> IO () -handleConnection st dstatus scanremotes = - reconnectRemotes thisThread st dstatus scanremotes - =<< networkRemotes st +handleConnection :: Assistant () +handleConnection = reconnectRemotes True =<< networkRemotes {- Finds network remotes. -} -networkRemotes :: ThreadState -> IO [Remote] -networkRemotes st = runThreadState st $ +networkRemotes :: Assistant [Remote] +networkRemotes = liftAnnex $ filter (isNothing . Remote.localpath) <$> remoteList diff --git a/Assistant/Threads/PairListener.hs b/Assistant/Threads/PairListener.hs index 9875dcb..1f9de09 100644 --- a/Assistant/Threads/PairListener.hs +++ b/Assistant/Threads/PairListener.hs @@ -11,13 +11,12 @@ import Assistant.Common import Assistant.Pairing import Assistant.Pairing.Network import Assistant.Pairing.MakeRemote -import Assistant.ThreadedMonad -import Assistant.ScanRemotes -import Assistant.DaemonStatus import Assistant.WebApp import Assistant.WebApp.Types import Assistant.Alert +import Assistant.DaemonStatus import Utility.ThreadScheduler +import Git import Network.Multicast import Network.Socket @@ -27,118 +26,116 @@ import Data.Char thisThread :: ThreadName thisThread = "PairListener" -pairListenerThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> UrlRenderer -> NamedThread -pairListenerThread st dstatus scanremotes urlrenderer = thread $ withSocketsDo $ - runEvery (Seconds 1) $ void $ tryIO $ do - sock <- getsock - go sock [] [] - where - thread = NamedThread thisThread +pairListenerThread :: UrlRenderer -> NamedThread +pairListenerThread urlrenderer = NamedThread "PairListener" $ do + listener <- asIO1 $ go [] [] + liftIO $ withSocketsDo $ + runEvery (Seconds 1) $ void $ tryIO $ + listener =<< getsock + where + {- Note this can crash if there's no network interface, + - or only one like lo that doesn't support multicast. -} + getsock = multicastReceiver (multicastAddress $ IPv4Addr undefined) pairingPort - {- Note this can crash if there's no network interface, - - or only one like lo that doesn't support multicast. -} - getsock = multicastReceiver (multicastAddress $ IPv4Addr undefined) pairingPort - - go sock reqs cache = getmsg sock [] >>= \msg -> case readish msg of - Nothing -> go sock reqs cache - Just m -> do - sane <- checkSane msg - (pip, verified) <- verificationCheck m - =<< (pairingInProgress <$> getDaemonStatus dstatus) - let wrongstage = maybe False (\p -> pairMsgStage m <= inProgressPairStage p) pip - case (wrongstage, sane, pairMsgStage m) of - -- ignore our own messages, and - -- out of order messages - (True, _, _) -> go sock reqs cache - (_, False, _) -> go sock reqs cache - (_, _, PairReq) -> if m `elem` reqs - then go sock reqs (invalidateCache m cache) - else do - pairReqReceived verified dstatus urlrenderer m - go sock (m:take 10 reqs) (invalidateCache m cache) - (_, _, PairAck) -> - pairAckReceived verified pip st dstatus scanremotes m cache - >>= go sock reqs - (_, _, PairDone) -> do - pairDoneReceived verified pip st dstatus scanremotes m - go sock reqs cache + go reqs cache sock = liftIO (getmsg sock []) >>= \msg -> case readish msg of + Nothing -> go reqs cache sock + Just m -> do + sane <- checkSane msg + (pip, verified) <- verificationCheck m + =<< (pairingInProgress <$> getDaemonStatus) + let wrongstage = maybe False (\p -> pairMsgStage m <= inProgressPairStage p) pip + case (wrongstage, sane, pairMsgStage m) of + -- ignore our own messages, and + -- out of order messages + (True, _, _) -> go reqs cache sock + (_, False, _) -> go reqs cache sock + (_, _, PairReq) -> if m `elem` reqs + then go reqs (invalidateCache m cache) sock + else do + pairReqReceived verified urlrenderer m + go (m:take 10 reqs) (invalidateCache m cache) sock + (_, _, PairAck) -> do + cache' <- pairAckReceived verified pip m cache + go reqs cache' sock + (_, _, PairDone) -> do + pairDoneReceived verified pip m + go reqs cache sock - {- As well as verifying the message using the shared secret, - - check its UUID against the UUID we have stored. If - - they're the same, someone is sending bogus messages, - - which could be an attempt to brute force the shared - - secret. - -} - verificationCheck m (Just pip) = do - let verified = verifiedPairMsg m pip - let sameuuid = pairUUID (inProgressPairData pip) == pairUUID (pairMsgData m) - if not verified && sameuuid - then do - runThreadState st $ - warning "detected possible pairing brute force attempt; disabled pairing" - stopSending dstatus pip - return (Nothing, False) - else return (Just pip, verified && sameuuid) - verificationCheck _ Nothing = return (Nothing, False) + {- As well as verifying the message using the shared secret, + - check its UUID against the UUID we have stored. If + - they're the same, someone is sending bogus messages, + - which could be an attempt to brute force the shared secret. -} + verificationCheck _ Nothing = return (Nothing, False) + verificationCheck m (Just pip) + | not verified && sameuuid = do + liftAnnex $ warning + "detected possible pairing brute force attempt; disabled pairing" + stopSending pip + return (Nothing, False) + |otherwise = return (Just pip, verified && sameuuid) + where + verified = verifiedPairMsg m pip + sameuuid = pairUUID (inProgressPairData pip) == pairUUID (pairMsgData m) - {- Various sanity checks on the content of the message. -} - checkSane msg - {- Control characters could be used in a - - console poisoning attack. -} - | any isControl msg || any (`elem` "\r\n") msg = do - runThreadState st $ - warning "illegal control characters in pairing message; ignoring" - return False - | otherwise = return True + {- Various sanity checks on the content of the message. -} + checkSane msg + {- Control characters could be used in a + - console poisoning attack. -} + | any isControl msg || any (`elem` "\r\n") msg = do + liftAnnex $ warning + "illegal control characters in pairing message; ignoring" + return False + | otherwise = return True - {- PairReqs invalidate the cache of recently finished pairings. - - This is so that, if a new pairing is started with the - - same secret used before, a bogus PairDone is not sent. -} - invalidateCache msg = filter (not . verifiedPairMsg msg) + {- PairReqs invalidate the cache of recently finished pairings. + - This is so that, if a new pairing is started with the + - same secret used before, a bogus PairDone is not sent. -} + invalidateCache msg = filter (not . verifiedPairMsg msg) - getmsg sock c = do - (msg, n, _) <- recvFrom sock chunksz - if n < chunksz - then return $ c ++ msg - else getmsg sock $ c ++ msg - where - chunksz = 1024 + getmsg sock c = do + (msg, n, _) <- recvFrom sock chunksz + if n < chunksz + then return $ c ++ msg + else getmsg sock $ c ++ msg + where + chunksz = 1024 {- Show an alert when a PairReq is seen. -} -pairReqReceived :: Bool -> DaemonStatusHandle -> UrlRenderer -> PairMsg -> IO () -pairReqReceived True _ _ _ = noop -- ignore our own PairReq -pairReqReceived False dstatus urlrenderer msg = do - url <- renderUrl urlrenderer (FinishPairR msg) [] - void $ addAlert dstatus $ pairRequestReceivedAlert repo +pairReqReceived :: Bool -> UrlRenderer -> PairMsg -> Assistant () +pairReqReceived True _ _ = noop -- ignore our own PairReq +pairReqReceived False urlrenderer msg = do + url <- liftIO $ renderUrl urlrenderer (FinishLocalPairR msg) [] + closealert <- asIO1 removeAlert + void $ addAlert $ pairRequestReceivedAlert repo AlertButton { buttonUrl = url , buttonLabel = T.pack "Respond" - , buttonAction = Just $ removeAlert dstatus + , buttonAction = Just closealert } - where - repo = pairRepo msg + where + repo = pairRepo msg {- When a verified PairAck is seen, a host is ready to pair with us, and has - already configured our ssh key. Stop sending PairReqs, finish the pairing, - - and send a single PairDone. - -} -pairAckReceived :: Bool -> Maybe PairingInProgress -> ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> PairMsg -> [PairingInProgress] -> IO [PairingInProgress] -pairAckReceived True (Just pip) st dstatus scanremotes msg cache = do - stopSending dstatus pip - setupAuthorizedKeys msg - finishedPairing st dstatus scanremotes msg (inProgressSshKeyPair pip) - startSending dstatus pip PairDone $ multicastPairMsg + - and send a single PairDone. -} +pairAckReceived :: Bool -> Maybe PairingInProgress -> PairMsg -> [PairingInProgress] -> Assistant [PairingInProgress] +pairAckReceived True (Just pip) msg cache = do + stopSending pip + repodir <- repoPath <$> liftAnnex gitRepo + liftIO $ setupAuthorizedKeys msg repodir + finishedLocalPairing msg (inProgressSshKeyPair pip) + startSending pip PairDone $ multicastPairMsg (Just 1) (inProgressSecret pip) (inProgressPairData pip) return $ pip : take 10 cache {- A stale PairAck might also be seen, after we've finished pairing. - Perhaps our PairDone was not received. To handle this, we keep - a cache of recently finished pairings, and re-send PairDone in - response to stale PairAcks for them. -} -pairAckReceived _ _ _ dstatus _ msg cache = do +pairAckReceived _ _ msg cache = do let pips = filter (verifiedPairMsg msg) cache unless (null pips) $ forM_ pips $ \pip -> - startSending dstatus pip PairDone $ multicastPairMsg + startSending pip PairDone $ multicastPairMsg (Just 1) (inProgressSecret pip) (inProgressPairData pip) return cache @@ -151,9 +148,9 @@ pairAckReceived _ _ _ dstatus _ msg cache = do - entering the secret. Would be better to start a fresh pair request in this - situation. -} -pairDoneReceived :: Bool -> Maybe PairingInProgress -> ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> PairMsg -> IO () -pairDoneReceived False _ _ _ _ _ = noop -- not verified -pairDoneReceived True Nothing _ _ _ _ = noop -- not in progress -pairDoneReceived True (Just pip) st dstatus scanremotes msg = do - stopSending dstatus pip - finishedPairing st dstatus scanremotes msg (inProgressSshKeyPair pip) +pairDoneReceived :: Bool -> Maybe PairingInProgress -> PairMsg -> Assistant () +pairDoneReceived False _ _ = noop -- not verified +pairDoneReceived True Nothing _ = noop -- not in progress +pairDoneReceived True (Just pip) msg = do + stopSending pip + finishedLocalPairing msg (inProgressSshKeyPair pip) diff --git a/Assistant/Threads/Pusher.hs b/Assistant/Threads/Pusher.hs index 4f3a2dd..035a454 100644 --- a/Assistant/Threads/Pusher.hs +++ b/Assistant/Threads/Pusher.hs @@ -9,13 +9,12 @@ module Assistant.Threads.Pusher where import Assistant.Common import Assistant.Commits +import Assistant.Types.Commits import Assistant.Pushes import Assistant.Alert -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Assistant.Sync import Utility.ThreadScheduler -import qualified Remote import qualified Types.Remote as Remote import Data.Time.Clock @@ -24,52 +23,37 @@ thisThread :: ThreadName thisThread = "Pusher" {- This thread retries pushes that failed before. -} -pushRetryThread :: ThreadState -> DaemonStatusHandle -> FailedPushMap -> NamedThread -pushRetryThread st dstatus pushmap = thread $ runEvery (Seconds halfhour) $ do +pushRetryThread :: NamedThread +pushRetryThread = NamedThread "PushRetrier" $ runEvery (Seconds halfhour) <~> do -- We already waited half an hour, now wait until there are failed -- pushes to retry. - topush <- getFailedPushesBefore pushmap (fromIntegral halfhour) + topush <- getFailedPushesBefore (fromIntegral halfhour) unless (null topush) $ do - debug thisThread - [ "retrying" - , show (length topush) - , "failed pushes" - ] - now <- getCurrentTime - void $ alertWhile dstatus (pushRetryAlert topush) $ - pushToRemotes thisThread now st (Just pushmap) topush - where - halfhour = 1800 - thread = NamedThread thisThread + debug ["retrying", show (length topush), "failed pushes"] + void $ alertWhile (pushRetryAlert topush) $ do + now <- liftIO $ getCurrentTime + pushToRemotes now True topush + where + halfhour = 1800 {- This thread pushes git commits out to remotes soon after they are made. -} -pushThread :: ThreadState -> DaemonStatusHandle -> CommitChan -> FailedPushMap -> NamedThread -pushThread st dstatus commitchan pushmap = thread $ runEvery (Seconds 2) $ do +pushThread :: NamedThread +pushThread = NamedThread "Pusher" $ runEvery (Seconds 2) <~> do -- We already waited two seconds as a simple rate limiter. -- Next, wait until at least one commit has been made - commits <- getCommits commitchan + commits <- getCommits -- Now see if now's a good time to push. - now <- getCurrentTime - if shouldPush now commits + if shouldPush commits then do - remotes <- filter pushable . syncRemotes - <$> getDaemonStatus dstatus - unless (null remotes) $ - void $ alertWhile dstatus (pushAlert remotes) $ - pushToRemotes thisThread now st (Just pushmap) remotes + remotes <- filter (not . Remote.readonly) + . syncGitRemotes <$> getDaemonStatus + unless (null remotes) $ + void $ alertWhile (pushAlert remotes) $ do + now <- liftIO $ getCurrentTime + pushToRemotes now True remotes else do - debug thisThread - [ "delaying push of" - , show (length commits) - , "commits" - ] - refillCommits commitchan commits - where - thread = NamedThread thisThread - pushable r - | Remote.specialRemote r = False - | Remote.readonly r = False - | otherwise = True + debug ["delaying push of", show (length commits), "commits"] + refillCommits commits {- Decide if now is a good time to push to remotes. - @@ -77,7 +61,7 @@ pushThread st dstatus commitchan pushmap = thread $ runEvery (Seconds 2) $ do - already determines batches of changes, so we can't easily determine - batches better. -} -shouldPush :: UTCTime -> [Commit] -> Bool -shouldPush _now commits +shouldPush :: [Commit] -> Bool +shouldPush commits | not (null commits) = True | otherwise = False diff --git a/Assistant/Threads/SanityChecker.hs b/Assistant/Threads/SanityChecker.hs index 9122700..1871b68 100644 --- a/Assistant/Threads/SanityChecker.hs +++ b/Assistant/Threads/SanityChecker.hs @@ -11,60 +11,51 @@ module Assistant.Threads.SanityChecker ( import Assistant.Common import Assistant.DaemonStatus -import Assistant.ThreadedMonad -import Assistant.Changes import Assistant.Alert -import Assistant.TransferQueue import qualified Git.LsFiles import Utility.ThreadScheduler import qualified Assistant.Threads.Watcher as Watcher import Data.Time.Clock.POSIX -thisThread :: ThreadName -thisThread = "SanityChecker" - {- This thread wakes up occasionally to make sure the tree is in good shape. -} -sanityCheckerThread :: ThreadState -> DaemonStatusHandle -> TransferQueue -> ChangeChan -> NamedThread -sanityCheckerThread st dstatus transferqueue changechan = thread $ forever $ do - waitForNextCheck dstatus +sanityCheckerThread :: NamedThread +sanityCheckerThread = NamedThread "SanityChecker" $ forever $ do + waitForNextCheck - debug thisThread ["starting sanity check"] + debug ["starting sanity check"] + void $ alertWhile sanityCheckAlert go + debug ["sanity check complete"] + where + go = do + modifyDaemonStatus_ $ \s -> s { sanityCheckRunning = True } - void $ alertWhile dstatus sanityCheckAlert go - - debug thisThread ["sanity check complete"] - where - thread = NamedThread thisThread - go = do - modifyDaemonStatus_ dstatus $ \s -> s - { sanityCheckRunning = True } + now <- liftIO $ getPOSIXTime -- before check started + r <- either showerr return =<< tryIO <~> check - now <- getPOSIXTime -- before check started - r <- catchIO (check st dstatus transferqueue changechan) - $ \e -> do - runThreadState st $ warning $ show e - return False + modifyDaemonStatus_ $ \s -> s + { sanityCheckRunning = False + , lastSanityCheck = Just now + } - modifyDaemonStatus_ dstatus $ \s -> s - { sanityCheckRunning = False - , lastSanityCheck = Just now - } + return r - return r + showerr e = do + liftAnnex $ warning $ show e + return False {- Only run one check per day, from the time of the last check. -} -waitForNextCheck :: DaemonStatusHandle -> IO () -waitForNextCheck dstatus = do - v <- lastSanityCheck <$> getDaemonStatus dstatus - now <- getPOSIXTime - threadDelaySeconds $ Seconds $ calcdelay now v - where - calcdelay _ Nothing = oneDay - calcdelay now (Just lastcheck) - | lastcheck < now = max oneDay $ - oneDay - truncate (now - lastcheck) - | otherwise = oneDay +waitForNextCheck :: Assistant () +waitForNextCheck = do + v <- lastSanityCheck <$> getDaemonStatus + now <- liftIO getPOSIXTime + liftIO $ threadDelaySeconds $ Seconds $ calcdelay now v + where + calcdelay _ Nothing = oneDay + calcdelay now (Just lastcheck) + | lastcheck < now = max oneDay $ + oneDay - truncate (now - lastcheck) + | otherwise = oneDay oneDay :: Int oneDay = 24 * 60 * 60 @@ -72,29 +63,26 @@ oneDay = 24 * 60 * 60 {- It's important to stay out of the Annex monad as much as possible while - running potentially expensive parts of this check, since remaining in it - will block the watcher. -} -check :: ThreadState -> DaemonStatusHandle -> TransferQueue -> ChangeChan -> IO Bool -check st dstatus transferqueue changechan = do - g <- runThreadState st gitRepo +check :: Assistant Bool +check = do + g <- liftAnnex gitRepo -- Find old unstaged symlinks, and add them to git. - (unstaged, cleanup) <- Git.LsFiles.notInRepo False ["."] g - now <- getPOSIXTime + (unstaged, cleanup) <- liftIO $ Git.LsFiles.notInRepo False ["."] g + now <- liftIO $ getPOSIXTime forM_ unstaged $ \file -> do - ms <- catchMaybeIO $ getSymbolicLinkStatus file + ms <- liftIO $ catchMaybeIO $ getSymbolicLinkStatus file case ms of Just s | toonew (statusChangeTime s) now -> noop - | isSymbolicLink s -> - addsymlink file ms + | isSymbolicLink s -> addsymlink file ms _ -> noop - void cleanup + liftIO $ void cleanup return True - where - toonew timestamp now = now < (realToFrac (timestamp + slop) :: POSIXTime) - slop = fromIntegral tenMinutes - insanity msg = do - runThreadState st $ warning msg - void $ addAlert dstatus $ sanityCheckFixAlert msg - addsymlink file s = do - Watcher.runHandler thisThread st dstatus - transferqueue changechan - Watcher.onAddSymlink file s - insanity $ "found unstaged symlink: " ++ file + where + toonew timestamp now = now < (realToFrac (timestamp + slop) :: POSIXTime) + slop = fromIntegral tenMinutes + insanity msg = do + liftAnnex $ warning msg + void $ addAlert $ sanityCheckFixAlert msg + addsymlink file s = do + Watcher.runHandler Watcher.onAddSymlink file s + insanity $ "found unstaged symlink: " ++ file diff --git a/Assistant/Threads/TransferPoller.hs b/Assistant/Threads/TransferPoller.hs index afead63..9118e9b 100644 --- a/Assistant/Threads/TransferPoller.hs +++ b/Assistant/Threads/TransferPoller.hs @@ -8,7 +8,6 @@ module Assistant.Threads.TransferPoller where import Assistant.Common -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Logs.Transfer import Utility.NotificationBroadcaster @@ -17,46 +16,41 @@ import qualified Assistant.Threads.TransferWatcher as TransferWatcher import Control.Concurrent import qualified Data.Map as M -thisThread :: ThreadName -thisThread = "TransferPoller" - {- This thread polls the status of ongoing transfers, determining how much - of each transfer is complete. -} -transferPollerThread :: ThreadState -> DaemonStatusHandle -> NamedThread -transferPollerThread st dstatus = thread $ do - g <- runThreadState st gitRepo - tn <- newNotificationHandle =<< - transferNotifier <$> getDaemonStatus dstatus +transferPollerThread :: NamedThread +transferPollerThread = NamedThread "TransferPoller" $ do + g <- liftAnnex gitRepo + tn <- liftIO . newNotificationHandle =<< + transferNotifier <$> getDaemonStatus forever $ do - threadDelay 500000 -- 0.5 seconds - ts <- currentTransfers <$> getDaemonStatus dstatus + liftIO $ threadDelay 500000 -- 0.5 seconds + ts <- currentTransfers <$> getDaemonStatus if M.null ts - then waitNotification tn -- block until transfers running + -- block until transfers running + then liftIO $ waitNotification tn else mapM_ (poll g) $ M.toList ts - where - thread = NamedThread thisThread - poll g (t, info) - {- Downloads are polled by checking the size of the - - temp file being used for the transfer. -} - | transferDirection t == Download = do - let f = gitAnnexTmpLocation (transferKey t) g - sz <- catchMaybeIO $ - fromIntegral . fileSize - <$> getFileStatus f - newsize t info sz - {- Uploads don't need to be polled for when the - - TransferWatcher thread can track file - - modifications. -} - | TransferWatcher.watchesTransferSize = noop - {- Otherwise, this code polls the upload progress - - by reading the transfer info file. -} - | otherwise = do - let f = transferFile t g - mi <- catchDefaultIO Nothing $ - readTransferInfoFile Nothing f - maybe noop (newsize t info . bytesComplete) mi - newsize t info sz - | bytesComplete info /= sz && isJust sz = - alterTransferInfo dstatus t $ - \i -> i { bytesComplete = sz } - | otherwise = noop + where + poll g (t, info) + {- Downloads are polled by checking the size of the + - temp file being used for the transfer. -} + | transferDirection t == Download = do + let f = gitAnnexTmpLocation (transferKey t) g + sz <- liftIO $ catchMaybeIO $ + fromIntegral . fileSize <$> getFileStatus f + newsize t info sz + {- Uploads don't need to be polled for when the TransferWatcher + - thread can track file modifications. -} + | TransferWatcher.watchesTransferSize = noop + {- Otherwise, this code polls the upload progress + - by reading the transfer info file. -} + | otherwise = do + let f = transferFile t g + mi <- liftIO $ catchDefaultIO Nothing $ + readTransferInfoFile Nothing f + maybe noop (newsize t info . bytesComplete) mi + + newsize t info sz + | bytesComplete info /= sz && isJust sz = + alterTransferInfo t $ \i -> i { bytesComplete = sz } + | otherwise = noop diff --git a/Assistant/Threads/TransferScanner.hs b/Assistant/Threads/TransferScanner.hs index bc58375..918a266 100644 --- a/Assistant/Threads/TransferScanner.hs +++ b/Assistant/Threads/TransferScanner.hs @@ -8,11 +8,12 @@ module Assistant.Threads.TransferScanner where import Assistant.Common +import Assistant.Types.ScanRemotes import Assistant.ScanRemotes import Assistant.TransferQueue -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Assistant.Alert +import Assistant.Drop import Logs.Transfer import Logs.Location import Logs.Web (webUUID) @@ -20,116 +21,120 @@ import qualified Remote import qualified Types.Remote as Remote import Utility.ThreadScheduler import qualified Git.LsFiles as LsFiles -import Command +import qualified Backend import Annex.Content import Annex.Wanted import qualified Data.Set as S -thisThread :: ThreadName -thisThread = "TransferScanner" - {- This thread waits until a remote needs to be scanned, to find transfers - that need to be made, to keep data in sync. -} -transferScannerThread :: ThreadState -> DaemonStatusHandle -> ScanRemoteMap -> TransferQueue -> NamedThread -transferScannerThread st dstatus scanremotes transferqueue = thread $ do +transferScannerThread :: NamedThread +transferScannerThread = NamedThread "TransferScanner" $ do startupScan go S.empty - where - thread = NamedThread thisThread - go scanned = do - threadDelaySeconds (Seconds 2) - (rs, infos) <- unzip <$> getScanRemote scanremotes - if any fullScan infos || any (`S.notMember` scanned) rs - then do - expensiveScan st dstatus transferqueue rs - go $ scanned `S.union` S.fromList rs - else do - mapM_ (failedTransferScan st dstatus transferqueue) rs - go scanned - {- All available remotes are scanned in full on startup, - - for multiple reasons, including: - - - - * This may be the first run, and there may be remotes - - already in place, that need to be synced. - - * We may have run before, and scanned a remote, but - - only been in a subdirectory of the git remote, and so - - not synced it all. - - * We may have run before, and had transfers queued, - - and then the system (or us) crashed, and that info was - - lost. - -} - startupScan = addScanRemotes scanremotes True - =<< syncRemotes <$> getDaemonStatus dstatus + where + go scanned = do + liftIO $ threadDelaySeconds (Seconds 2) + (rs, infos) <- unzip <$> getScanRemote + if any fullScan infos || any (`S.notMember` scanned) rs + then do + expensiveScan rs + go $ scanned `S.union` S.fromList rs + else do + mapM_ failedTransferScan rs + go scanned + {- All available remotes are scanned in full on startup, + - for multiple reasons, including: + - + - * This may be the first run, and there may be remotes + - already in place, that need to be synced. + - * We may have run before, and scanned a remote, but + - only been in a subdirectory of the git remote, and so + - not synced it all. + - * We may have run before, and had transfers queued, + - and then the system (or us) crashed, and that info was + - lost. + -} + startupScan = addScanRemotes True =<< syncDataRemotes <$> getDaemonStatus {- This is a cheap scan for failed transfers involving a remote. -} -failedTransferScan :: ThreadState -> DaemonStatusHandle -> TransferQueue -> Remote -> IO () -failedTransferScan st dstatus transferqueue r = do - failed <- runThreadState st $ getFailedTransfers (Remote.uuid r) - runThreadState st $ mapM_ removeFailedTransfer $ map fst failed +failedTransferScan :: Remote -> Assistant () +failedTransferScan r = do + failed <- liftAnnex $ getFailedTransfers (Remote.uuid r) + liftAnnex $ mapM_ removeFailedTransfer $ map fst failed mapM_ retry failed - where - retry (t, info) - | transferDirection t == Download = do - {- Check if the remote still has the key. - - If not, relies on the expensiveScan to - - get it queued from some other remote. -} - whenM (runThreadState st $ remoteHas r $ transferKey t) $ - requeue t info - | otherwise = do - {- The Transferrer checks when uploading - - that the remote doesn't already have the - - key, so it's not redundantly checked - - here. -} + where + retry (t, info) + | transferDirection t == Download = do + {- Check if the remote still has the key. + - If not, relies on the expensiveScan to + - get it queued from some other remote. -} + whenM (liftAnnex $ remoteHas r $ transferKey t) $ requeue t info - requeue t info = queueTransferWhenSmall - transferqueue dstatus (associatedFile info) t r + | otherwise = do + {- The Transferrer checks when uploading + - that the remote doesn't already have the + - key, so it's not redundantly checked here. -} + requeue t info + requeue t info = queueTransferWhenSmall (associatedFile info) t r {- This is a expensive scan through the full git work tree, finding - - files to download from or upload to any known remote. - - - - The scan is blocked when the transfer queue gets too large. -} -expensiveScan :: ThreadState -> DaemonStatusHandle -> TransferQueue -> [Remote] -> IO () -expensiveScan st dstatus transferqueue rs = unless onlyweb $ do - liftIO $ debug thisThread ["starting scan of", show visiblers] - void $ alertWhile dstatus (scanAlert visiblers) $ do - g <- runThreadState st gitRepo - (files, cleanup) <- LsFiles.inRepo [] g - go files - void cleanup + - files to transfer. The scan is blocked when the transfer queue gets + - too large. + - + - This also finds files that are present either here or on a remote + - but that are not preferred content, and drops them. Searching for files + - to drop is done concurrently with the scan for transfers. + - + - TODO: It would be better to first drop as much as we can, before + - transferring much, to minimise disk use. + -} +expensiveScan :: [Remote] -> Assistant () +expensiveScan rs = unless onlyweb $ do + debug ["starting scan of", show visiblers] + void $ alertWhile (scanAlert visiblers) $ do + g <- liftAnnex gitRepo + (files, cleanup) <- liftIO $ LsFiles.inRepo [] g + forM_ files $ \f -> do + ts <- maybe (return []) (findtransfers f) + =<< liftAnnex (Backend.lookupFile f) + mapM_ (enqueue f) ts + void $ liftIO cleanup return True - liftIO $ debug thisThread ["finished scan of", show visiblers] - where - onlyweb = all (== webUUID) $ map Remote.uuid rs - visiblers = let rs' = filter (not . Remote.readonly) rs - in if null rs' then rs else rs' - go [] = noop - go (f:fs) = do - mapM_ (enqueue f) =<< runThreadState st - (ifAnnexed f (findtransfers f) $ return []) - go fs - enqueue f (r, t) = do - debug thisThread ["queuing", show t] - queueTransferWhenSmall transferqueue dstatus (Just f) t r - findtransfers f (key, _) = do + debug ["finished scan of", show visiblers] + where + onlyweb = all (== webUUID) $ map Remote.uuid rs + visiblers = let rs' = filter (not . Remote.readonly) rs + in if null rs' then rs else rs' + enqueue f (r, t) = do + debug ["queuing", show t] + queueTransferWhenSmall (Just f) t r + findtransfers f (key, _) = do + {- The syncable remotes may have changed since this + - scan began. -} + syncrs <- syncDataRemotes <$> getDaemonStatus + liftAnnex $ do locs <- loggedLocations key - {- Queue transfers from any known remote. The known - - remotes may have changed since this scan began. -} - let use a = do - syncrs <- liftIO $ syncRemotes <$> getDaemonStatus dstatus - return $ catMaybes $ map (a key locs) syncrs - ifM (inAnnex key) - ( filterM (wantSend (Just f) . Remote.uuid . fst) - =<< use (check Upload False) - , ifM (wantGet $ Just f) - ( use (check Download True) , return [] ) - ) - check direction want key locs r - | direction == Upload && Remote.readonly r = Nothing - | (Remote.uuid r `elem` locs) == want = Just - (r, Transfer direction (Remote.uuid r) key) - | otherwise = Nothing + present <- inAnnex key + + handleDrops' locs syncrs present key (Just f) + + let slocs = S.fromList locs + let use a = return $ catMaybes $ map (a key slocs) syncrs + if present + then filterM (wantSend (Just f) . Remote.uuid . fst) + =<< use (genTransfer Upload False) + else ifM (wantGet $ Just f) + ( use (genTransfer Download True) , return [] ) + +genTransfer :: Direction -> Bool -> Key -> S.Set UUID -> Remote -> Maybe (Remote, Transfer) +genTransfer direction want key slocs r + | direction == Upload && Remote.readonly r = Nothing + | (S.member (Remote.uuid r) slocs) == want = Just + (r, Transfer direction (Remote.uuid r) key) + | otherwise = Nothing remoteHas :: Remote -> Key -> Annex Bool remoteHas r key = elem diff --git a/Assistant/Threads/TransferWatcher.hs b/Assistant/Threads/TransferWatcher.hs index a54128c..7deafb1 100644 --- a/Assistant/Threads/TransferWatcher.hs +++ b/Assistant/Threads/TransferWatcher.hs @@ -8,84 +8,78 @@ module Assistant.Threads.TransferWatcher where import Assistant.Common -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Assistant.TransferQueue +import Assistant.Drop import Annex.Content import Logs.Transfer import Utility.DirWatcher import Utility.Types.DirWatcher import qualified Remote -thisThread :: ThreadName -thisThread = "TransferWatcher" +import Control.Concurrent {- This thread watches for changes to the gitAnnexTransferDir, - and updates the DaemonStatus's map of ongoing transfers. -} -transferWatcherThread :: ThreadState -> DaemonStatusHandle -> TransferQueue -> NamedThread -transferWatcherThread st dstatus transferqueue = thread $ do - g <- runThreadState st gitRepo - let dir = gitAnnexTransferDir g - createDirectoryIfMissing True dir - let hook a = Just $ runHandler st dstatus transferqueue a +transferWatcherThread :: NamedThread +transferWatcherThread = NamedThread "TransferWatcher" $ do + dir <- liftAnnex $ gitAnnexTransferDir <$> gitRepo + liftIO $ createDirectoryIfMissing True dir + let hook a = Just <$> asIO2 (runHandler a) + addhook <- hook onAdd + delhook <- hook onDel + modifyhook <- hook onModify + errhook <- hook onErr let hooks = mkWatchHooks - { addHook = hook onAdd - , delHook = hook onDel - , modifyHook = hook onModify - , errHook = hook onErr + { addHook = addhook + , delHook = delhook + , modifyHook = modifyhook + , errHook = errhook } - void $ watchDir dir (const False) hooks id - debug thisThread ["watching for transfers"] - where - thread = NamedThread thisThread + void $ liftIO $ watchDir dir (const False) hooks id + debug ["watching for transfers"] -type Handler = ThreadState -> DaemonStatusHandle -> TransferQueue -> FilePath -> Maybe FileStatus -> IO () +type Handler = FilePath -> Assistant () {- Runs an action handler. - - Exceptions are ignored, otherwise a whole thread could be crashed. -} -runHandler :: ThreadState -> DaemonStatusHandle -> TransferQueue -> Handler -> FilePath -> Maybe FileStatus -> IO () -runHandler st dstatus transferqueue handler file filestatus = void $ - either print (const noop) =<< tryIO go - where - go = handler st dstatus transferqueue file filestatus +runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant () +runHandler handler file _filestatus = + either (liftIO . print) (const noop) =<< tryIO <~> handler file {- Called when there's an error with inotify. -} onErr :: Handler -onErr _ _ _ msg _ = error msg +onErr msg = error msg {- Called when a new transfer information file is written. -} onAdd :: Handler -onAdd st dstatus _ file _ = case parseTransferFile file of +onAdd file = case parseTransferFile file of Nothing -> noop - Just t -> go t =<< runThreadState st (checkTransfer t) - where - go _ Nothing = noop -- transfer already finished - go t (Just info) = do - debug thisThread - [ "transfer starting:" - , show t - ] - r <- headMaybe . filter (sameuuid t) - <$> runThreadState st Remote.remoteList - updateTransferInfo dstatus t info - { transferRemote = r } - sameuuid t r = Remote.uuid r == transferUUID t + Just t -> go t =<< liftAnnex (checkTransfer t) + where + go _ Nothing = noop -- transfer already finished + go t (Just info) = do + debug [ "transfer starting:", show t] + r <- headMaybe . filter (sameuuid t) + <$> liftAnnex Remote.remoteList + updateTransferInfo t info { transferRemote = r } + sameuuid t r = Remote.uuid r == transferUUID t {- Called when a transfer information file is updated. - - The only thing that should change in the transfer info is the - bytesComplete, so that's the only thing updated in the DaemonStatus. -} onModify :: Handler -onModify _ dstatus _ file _ = do +onModify file = do case parseTransferFile file of Nothing -> noop - Just t -> go t =<< readTransferInfoFile Nothing file - where - go _ Nothing = noop - go t (Just newinfo) = alterTransferInfo dstatus t $ \info -> - info { bytesComplete = bytesComplete newinfo } + Just t -> go t =<< liftIO (readTransferInfoFile Nothing file) + where + go _ Nothing = noop + go t (Just newinfo) = alterTransferInfo t $ + \i -> i { bytesComplete = bytesComplete newinfo } {- This thread can only watch transfer sizes when the DirWatcher supports - tracking modificatons to files. -} @@ -94,24 +88,36 @@ watchesTransferSize = modifyTracked {- Called when a transfer information file is removed. -} onDel :: Handler -onDel st dstatus transferqueue file _ = case parseTransferFile file of +onDel file = case parseTransferFile file of Nothing -> noop Just t -> do - debug thisThread - [ "transfer finishing:" - , show t - ] - minfo <- removeTransfer dstatus t + debug [ "transfer finishing:", show t] + minfo <- removeTransfer t + + finished <- asIO2 finishedTransfer + void $ liftIO $ forkIO $ do + {- XXX race workaround delay. The location + - log needs to be updated before finishedTransfer + - runs. -} + threadDelay 10000000 -- 10 seconds + finished t minfo + +{- Queue uploads of files we successfully downloaded, spreading them + - out to other reachable remotes. + - + - Downloading a file may have caused a remote to not want it; + - so drop it from the remote. + - + - Uploading a file may cause the local repo, or some other remote to not + - want it; handle that too. + -} +finishedTransfer :: Transfer -> Maybe TransferInfo -> Assistant () +finishedTransfer t (Just info) + | transferDirection t == Download = + whenM (liftAnnex $ inAnnex $ transferKey t) $ do + handleDrops False (transferKey t) (associatedFile info) + queueTransfersMatching (/= transferUUID t) Later + (transferKey t) (associatedFile info) Upload + | otherwise = handleDrops True (transferKey t) (associatedFile info) +finishedTransfer _ _ = noop - {- Queue uploads of files we successfully downloaded, - - spreading them out to other reachable remotes. -} - case (minfo, transferDirection t) of - (Just info, Download) -> runThreadState st $ - whenM (inAnnex $ transferKey t) $ - queueTransfersMatching - (/= transferUUID t) - Later transferqueue dstatus - (transferKey t) - (associatedFile info) - Upload - _ -> noop diff --git a/Assistant/Threads/Transferrer.hs b/Assistant/Threads/Transferrer.hs index fe06d5f..1d23487 100644 --- a/Assistant/Threads/Transferrer.hs +++ b/Assistant/Threads/Transferrer.hs @@ -8,11 +8,11 @@ module Assistant.Threads.Transferrer where import Assistant.Common -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Assistant.TransferQueue import Assistant.TransferSlots import Assistant.Alert +import Assistant.Commits import Logs.Transfer import Logs.Location import Annex.Content @@ -22,67 +22,71 @@ import Locations.UserConfig import System.Process (create_group) -thisThread :: ThreadName -thisThread = "Transferrer" - {- For now only one transfer is run at a time. -} maxTransfers :: Int maxTransfers = 1 {- Dispatches transfers from the queue. -} -transfererThread :: ThreadState -> DaemonStatusHandle -> TransferQueue -> TransferSlots -> NamedThread -transfererThread st dstatus transferqueue slots = thread $ go =<< readProgramFile - where - thread = NamedThread thisThread - go program = forever $ inTransferSlot dstatus slots $ - maybe (return Nothing) (uncurry $ startTransfer st dstatus program) - =<< getNextTransfer transferqueue dstatus notrunning - {- Skip transfers that are already running. -} - notrunning = isNothing . startedTime +transfererThread :: NamedThread +transfererThread = NamedThread "Transferr" $ do + program <- liftIO readProgramFile + forever $ inTransferSlot $ + maybe (return Nothing) (uncurry $ startTransfer program) + =<< getNextTransfer notrunning + where + {- Skip transfers that are already running. -} + notrunning = isNothing . startedTime {- By the time this is called, the daemonstatus's transfer map should - already have been updated to include the transfer. -} -startTransfer :: ThreadState -> DaemonStatusHandle -> FilePath -> Transfer -> TransferInfo -> TransferGenerator -startTransfer st dstatus program t info = case (transferRemote info, associatedFile info) of - (Just remote, Just file) -> ifM (runThreadState st $ shouldTransfer t info) +startTransfer :: FilePath -> Transfer -> TransferInfo -> Assistant (Maybe (Transfer, TransferInfo, Assistant ())) +startTransfer program t info = case (transferRemote info, associatedFile info) of + (Just remote, Just file) -> ifM (liftAnnex $ shouldTransfer t info) ( do - debug thisThread [ "Transferring:" , show t ] - notifyTransfer dstatus + debug [ "Transferring:" , show t ] + notifyTransfer return $ Just (t, info, transferprocess remote file) , do - debug thisThread [ "Skipping unnecessary transfer:" , show t ] - void $ removeTransfer dstatus t + debug [ "Skipping unnecessary transfer:" , show t ] + void $ removeTransfer t return Nothing ) _ -> return Nothing - where - direction = transferDirection t - isdownload = direction == Download + where + direction = transferDirection t + isdownload = direction == Download - transferprocess remote file = void $ do - (_, _, _, pid) - <- createProcess (proc program $ toCommand params) - { create_group = True } - {- Alerts are only shown for successful transfers. - - Transfers can temporarily fail for many reasons, - - so there's no point in bothering the user about - - those. The assistant should recover. -} - whenM ((==) ExitSuccess <$> waitForProcess pid) $ void $ - addAlert dstatus $ - makeAlertFiller True $ - transferFileAlert direction True file - where - params = - [ Param "transferkey" - , Param "--quiet" - , Param $ key2file $ transferKey t - , Param $ if isdownload - then "--from" - else "--to" - , Param $ Remote.name remote - , Param "--file" - , File file - ] + transferprocess remote file = void $ do + (_, _, _, pid) + <- liftIO $ createProcess (proc program $ toCommand params) + { create_group = True } + {- Alerts are only shown for successful transfers. + - Transfers can temporarily fail for many reasons, + - so there's no point in bothering the user about + - those. The assistant should recover. + - + - Also, after a successful transfer, the location + - log has changed. Indicate that a commit has been + - made, in order to queue a push of the git-annex + - branch out to remotes that did not participate + - in the transfer. + -} + whenM (liftIO $ (==) ExitSuccess <$> waitForProcess pid) $ do + void $ addAlert $ makeAlertFiller True $ + transferFileAlert direction True file + recordCommit + where + params = + [ Param "transferkey" + , Param "--quiet" + , Param $ key2file $ transferKey t + , Param $ if isdownload + then "--from" + else "--to" + , Param $ Remote.name remote + , Param "--file" + , File file + ] {- Checks if the file to download is already present, or the remote - being uploaded to isn't known to have the file. -} @@ -100,5 +104,5 @@ shouldTransfer t info notElem (Remote.uuid remote) <$> loggedLocations key | otherwise = return False - where - key = transferKey t + where + key = transferKey t diff --git a/Assistant/Threads/Watcher.hs b/Assistant/Threads/Watcher.hs index 1bf9e85..a74976d 100644 --- a/Assistant/Threads/Watcher.hs +++ b/Assistant/Threads/Watcher.hs @@ -15,11 +15,12 @@ module Assistant.Threads.Watcher ( ) where import Assistant.Common -import Assistant.ThreadedMonad import Assistant.DaemonStatus import Assistant.Changes +import Assistant.Types.Changes import Assistant.TransferQueue import Assistant.Alert +import Assistant.Drop import Logs.Transfer import Utility.DirWatcher import Utility.Types.DirWatcher @@ -36,9 +37,6 @@ import Git.Types import Data.Bits.Utils import qualified Data.ByteString.Lazy as L -thisThread :: ThreadName -thisThread = "Watcher" - checkCanWatch :: Annex () checkCanWatch | canWatch = @@ -54,115 +52,120 @@ needLsof = error $ unlines , "Be warned: This can corrupt data in the annex, and make fsck complain." ] -watchThread :: ThreadState -> DaemonStatusHandle -> TransferQueue -> ChangeChan -> NamedThread -watchThread st dstatus transferqueue changechan = NamedThread thisThread $ do - void $ watchDir "." ignored hooks startup - debug thisThread [ "watching", "."] - where - startup = startupScan st dstatus - hook a = Just $ runHandler thisThread st dstatus transferqueue changechan a - hooks = mkWatchHooks - { addHook = hook onAdd - , delHook = hook onDel - , addSymlinkHook = hook onAddSymlink - , delDirHook = hook onDelDir - , errHook = hook onErr - } +watchThread :: NamedThread +watchThread = NamedThread "Watcher" $ do + startup <- asIO1 startupScan + addhook <- hook onAdd + delhook <- hook onDel + addsymlinkhook <- hook onAddSymlink + deldirhook <- hook onDelDir + errhook <- hook onErr + let hooks = mkWatchHooks + { addHook = addhook + , delHook = delhook + , addSymlinkHook = addsymlinkhook + , delDirHook = deldirhook + , errHook = errhook + } + void $ liftIO $ watchDir "." ignored hooks startup + debug [ "watching", "."] + where + hook a = Just <$> asIO2 (runHandler a) {- Initial scartup scan. The action should return once the scan is complete. -} -startupScan :: ThreadState -> DaemonStatusHandle -> IO a -> IO a -startupScan st dstatus scanner = do - runThreadState st $ showAction "scanning" - alertWhile' dstatus startupScanAlert $ do - r <- scanner +startupScan :: IO a -> Assistant a +startupScan scanner = do + liftAnnex $ showAction "scanning" + alertWhile' startupScanAlert $ do + r <- liftIO $ scanner -- Notice any files that were deleted before -- watching was started. - runThreadState st $ do + liftAnnex $ do inRepo $ Git.Command.run "add" [Param "--update"] showAction "started" - modifyDaemonStatus_ dstatus $ \s -> s { scanComplete = True } + modifyDaemonStatus_ $ \s -> s { scanComplete = True } return (True, r) ignored :: FilePath -> Bool ignored = ig . takeFileName - where + where ig ".git" = True ig ".gitignore" = True ig ".gitattributes" = True ig _ = False -type Handler = ThreadName -> FilePath -> Maybe FileStatus -> DaemonStatusHandle -> TransferQueue -> Annex (Maybe Change) +type Handler = FilePath -> Maybe FileStatus -> Assistant (Maybe Change) -{- Runs an action handler, inside the Annex monad, and if there was a - - change, adds it to the ChangeChan. +{- Runs an action handler, and if there was a change, adds it to the ChangeChan. - - Exceptions are ignored, otherwise a whole watcher thread could be crashed. -} -runHandler :: ThreadName -> ThreadState -> DaemonStatusHandle -> TransferQueue -> ChangeChan -> Handler -> FilePath -> Maybe FileStatus -> IO () -runHandler threadname st dstatus transferqueue changechan handler file filestatus = void $ do - r <- tryIO go +runHandler :: Handler -> FilePath -> Maybe FileStatus -> Assistant () +runHandler handler file filestatus = void $ do + r <- tryIO <~> handler file filestatus case r of - Left e -> print e + Left e -> liftIO $ print e Right Nothing -> noop - Right (Just change) -> recordChange changechan change - where - go = runThreadState st $ handler threadname file filestatus dstatus transferqueue + Right (Just change) -> do + -- Just in case the commit thread is not + -- flushing the queue fast enough. + liftAnnex $ Annex.Queue.flushWhenFull + recordChange change onAdd :: Handler -onAdd _ file filestatus _ _ +onAdd file filestatus | maybe False isRegularFile filestatus = pendingAddChange file | otherwise = noChange - where {- A symlink might be an arbitrary symlink, which is just added. - Or, if it is a git-annex symlink, ensure it points to the content - before adding it. -} onAddSymlink :: Handler -onAddSymlink threadname file filestatus dstatus transferqueue = go =<< Backend.lookupFile file - where - go (Just (key, _)) = do - link <- calcGitLink file key - ifM ((==) link <$> liftIO (readSymbolicLink file)) - ( do - s <- liftIO $ getDaemonStatus dstatus - checkcontent key s - ensurestaged link s - , do - liftIO $ debug threadname ["fix symlink", file] - liftIO $ removeFile file - liftIO $ createSymbolicLink link file - addlink link - ) - go Nothing = do -- other symlink - link <- liftIO (readSymbolicLink file) - ensurestaged link =<< liftIO (getDaemonStatus dstatus) - - {- This is often called on symlinks that are already - - staged correctly. A symlink may have been deleted - - and being re-added, or added when the watcher was - - not running. So they're normally restaged to make sure. - - - - As an optimisation, during the status scan, avoid - - restaging everything. Only links that were created since - - the last time the daemon was running are staged. - - (If the daemon has never ran before, avoid staging - - links too.) - -} - ensurestaged link daemonstatus - | scanComplete daemonstatus = addlink link - | otherwise = case filestatus of - Just s - | not (afterLastDaemonRun (statusChangeTime s) daemonstatus) -> noChange - _ -> addlink link - - {- For speed, tries to reuse the existing blob for - - the symlink target. -} - addlink link = do - liftIO $ debug threadname ["add symlink", file] +onAddSymlink file filestatus = go =<< liftAnnex (Backend.lookupFile file) + where + go (Just (key, _)) = do + link <- liftAnnex $ calcGitLink file key + ifM ((==) link <$> liftIO (readSymbolicLink file)) + ( do + s <- getDaemonStatus + checkcontent key s + ensurestaged link s + , do + liftIO $ removeFile file + liftIO $ createSymbolicLink link file + checkcontent key =<< getDaemonStatus + addlink link + ) + go Nothing = do -- other symlink + link <- liftIO (readSymbolicLink file) + ensurestaged link =<< getDaemonStatus + + {- This is often called on symlinks that are already + - staged correctly. A symlink may have been deleted + - and being re-added, or added when the watcher was + - not running. So they're normally restaged to make sure. + - + - As an optimisation, during the startup scan, avoid + - restaging everything. Only links that were created since + - the last time the daemon was running are staged. + - (If the daemon has never ran before, avoid staging + - links too.) + -} + ensurestaged link daemonstatus + | scanComplete daemonstatus = addlink link + | otherwise = case filestatus of + Just s + | not (afterLastDaemonRun (statusChangeTime s) daemonstatus) -> noChange + _ -> addlink link + + {- For speed, tries to reuse the existing blob for symlink target. -} + addlink link = do + debug ["add symlink", file] + liftAnnex $ do v <- catObjectDetails $ Ref $ ':':file case v of Just (currlink, sha) @@ -172,21 +175,24 @@ onAddSymlink threadname file filestatus dstatus transferqueue = go =<< Backend.l sha <- inRepo $ Git.HashObject.hashObject BlobObject link stageSymlink file sha - madeChange file LinkChange - - {- When a new link appears, after the startup scan, - - try to get the key's content. -} - checkcontent key daemonstatus - | scanComplete daemonstatus = unlessM (inAnnex key) $ - queueTransfers Next transferqueue dstatus - key (Just file) Download - | otherwise = noop + madeChange file LinkChange + + {- When a new link appears, or a link is changed, after the startup + - scan, handle getting or dropping the key's content. -} + checkcontent key daemonstatus + | scanComplete daemonstatus = do + present <- liftAnnex $ inAnnex key + unless present $ + queueTransfers Next key (Just file) Download + handleDrops present key (Just file) + | otherwise = noop onDel :: Handler -onDel threadname file _ _dstatus _ = do - liftIO $ debug threadname ["file deleted", file] - Annex.Queue.addUpdateIndex =<< - inRepo (Git.UpdateIndex.unstageFile file) +onDel file _ = do + debug ["file deleted", file] + liftAnnex $ + Annex.Queue.addUpdateIndex =<< + inRepo (Git.UpdateIndex.unstageFile file) madeChange file RmChange {- A directory has been deleted, or moved, so tell git to remove anything @@ -197,18 +203,18 @@ onDel threadname file _ _dstatus _ = do - command to get the recursive list of files in the directory, so rm is - just as good. -} onDelDir :: Handler -onDelDir threadname dir _ _dstatus _ = do - liftIO $ debug threadname ["directory deleted", dir] - Annex.Queue.addCommand "rm" +onDelDir dir _ = do + debug ["directory deleted", dir] + liftAnnex $ Annex.Queue.addCommand "rm" [Params "--quiet -r --cached --ignore-unmatch --"] [dir] madeChange dir RmDirChange {- Called when there's an error with inotify or kqueue. -} onErr :: Handler -onErr _ msg _ dstatus _ = do - warning msg - void $ liftIO $ addAlert dstatus $ warningAlert "watcher" msg - return Nothing +onErr msg _ = do + liftAnnex $ warning msg + void $ addAlert $ warningAlert "watcher" msg + noChange {- Adds a symlink to the index, without ever accessing the actual symlink - on disk. This avoids a race if git add is used, where the symlink is diff --git a/Assistant/Threads/WebApp.hs b/Assistant/Threads/WebApp.hs index 8b9db1e..be9a9a1 100644 --- a/Assistant/Threads/WebApp.hs +++ b/Assistant/Threads/WebApp.hs @@ -5,7 +5,7 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-} +{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes, CPP #-} {-# OPTIONS_GHC -fno-warn-orphans #-} module Assistant.Threads.WebApp where @@ -21,14 +21,13 @@ import Assistant.WebApp.Configurators.Edit import Assistant.WebApp.Configurators.Local import Assistant.WebApp.Configurators.Ssh import Assistant.WebApp.Configurators.Pairing +#ifdef WITH_S3 import Assistant.WebApp.Configurators.S3 +#endif +import Assistant.WebApp.Configurators.XMPP import Assistant.WebApp.Documentation import Assistant.WebApp.OtherRepos -import Assistant.ThreadedMonad -import Assistant.DaemonStatus -import Assistant.ScanRemotes -import Assistant.TransferQueue -import Assistant.TransferSlots +import Assistant.Types.ThreadedMonad import Utility.WebApp import Utility.FileMode import Utility.TempFile @@ -46,64 +45,59 @@ mkYesodDispatch "WebApp" $(parseRoutesFile "Assistant/WebApp/routes") type Url = String -webAppThread - :: Maybe ThreadState - -> DaemonStatusHandle - -> ScanRemoteMap - -> TransferQueue - -> TransferSlots +webAppThread + :: AssistantData -> UrlRenderer + -> Bool -> Maybe (IO String) -> Maybe (Url -> FilePath -> IO ()) -> NamedThread -webAppThread mst dstatus scanremotes transferqueue transferslots urlrenderer postfirstrun onstartup = thread $ do +webAppThread assistantdata urlrenderer noannex postfirstrun onstartup = thread $ liftIO $ do webapp <- WebApp - <$> pure mst - <*> pure dstatus - <*> pure scanremotes - <*> pure transferqueue - <*> pure transferslots + <$> pure assistantdata <*> (pack <$> genRandomToken) - <*> getreldir mst + <*> getreldir <*> pure $(embed "static") <*> newWebAppState <*> pure postfirstrun + <*> pure noannex setUrlRenderer urlrenderer $ yesodRender webapp (pack "") app <- toWaiAppPlain webapp app' <- ifM debugEnabled ( return $ httpDebugLogger app , return app ) - runWebApp app' $ \port -> case mst of - Nothing -> withTempFile "webapp.html" $ \tmpfile _ -> + runWebApp app' $ \port -> if noannex + then withTempFile "webapp.html" $ \tmpfile _ -> go port webapp tmpfile Nothing - Just st -> do + else do + let st = threadState assistantdata htmlshim <- runThreadState st $ fromRepo gitAnnexHtmlShim urlfile <- runThreadState st $ fromRepo gitAnnexUrlFile go port webapp htmlshim (Just urlfile) - where - thread = NamedThread thisThread - getreldir Nothing = return Nothing - getreldir (Just st) = Just <$> + where + thread = NamedThread thisThread + getreldir + | noannex = return Nothing + | otherwise = Just <$> (relHome =<< absPath - =<< runThreadState st (fromRepo repoPath)) - go port webapp htmlshim urlfile = do - debug thisThread ["running on port", show port] - let url = myUrl webapp port - maybe noop (`writeFile` url) urlfile - writeHtmlShim url htmlshim - maybe noop (\a -> a url htmlshim) onstartup + =<< runThreadState (threadState assistantdata) (fromRepo repoPath)) + go port webapp htmlshim urlfile = do + let url = myUrl webapp port + maybe noop (`writeFile` url) urlfile + writeHtmlShim url htmlshim + maybe noop (\a -> a url htmlshim) onstartup {- Creates a html shim file that's used to redirect into the webapp, - to avoid exposing the secretToken when launching the web browser. -} writeHtmlShim :: String -> FilePath -> IO () writeHtmlShim url file = viaTmp go file $ genHtmlShim url - where - go tmpfile content = do - h <- openFile tmpfile WriteMode - modifyFileMode tmpfile $ removeModes [groupReadMode, otherReadMode] - hPutStr h content - hClose h + where + go tmpfile content = do + h <- openFile tmpfile WriteMode + modifyFileMode tmpfile $ removeModes [groupReadMode, otherReadMode] + hPutStr h content + hClose h {- TODO: generate this static file using Yesod. -} genHtmlShim :: String -> String @@ -122,5 +116,5 @@ genHtmlShim url = unlines myUrl :: WebApp -> PortNumber -> Url myUrl webapp port = unpack $ yesodRender webapp urlbase HomeR [] - where - urlbase = pack $ "http://localhost:" ++ show port + where + urlbase = pack $ "http://localhost:" ++ show port diff --git a/Assistant/Threads/XMPPClient.hs b/Assistant/Threads/XMPPClient.hs new file mode 100644 index 0000000..df602df --- /dev/null +++ b/Assistant/Threads/XMPPClient.hs @@ -0,0 +1,257 @@ +{- git-annex XMPP client + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Threads.XMPPClient where + +import Assistant.Common +import Assistant.XMPP +import Assistant.XMPP.Client +import Assistant.NetMessager +import Assistant.Types.NetMessager +import Assistant.Types.Buddies +import Assistant.XMPP.Buddies +import Assistant.Sync +import Assistant.DaemonStatus +import qualified Remote +import Utility.ThreadScheduler +import Assistant.WebApp (UrlRenderer, renderUrl) +import Assistant.WebApp.Types +import Assistant.Alert +import Assistant.Pairing +import Assistant.XMPP.Git +import Annex.UUID + +import Network.Protocol.XMPP +import Control.Concurrent +import qualified Data.Text as T +import qualified Data.Set as S +import qualified Data.Map as M +import qualified Git.Branch +import Data.Time.Clock + +xmppClientThread :: UrlRenderer -> NamedThread +xmppClientThread urlrenderer = NamedThread "XMPPClient" $ + restartableClient . xmppClient urlrenderer =<< getAssistant id + +{- Runs the client, handing restart events. -} +restartableClient :: IO () -> Assistant () +restartableClient a = forever $ do + tid <- liftIO $ forkIO a + waitNetMessagerRestart + liftIO $ killThread tid + +xmppClient :: UrlRenderer -> AssistantData -> IO () +xmppClient urlrenderer d = do + v <- liftAssistant $ liftAnnex getXMPPCreds + case v of + Nothing -> noop -- will be restarted once creds get configured + Just c -> retry (runclient c) =<< getCurrentTime + where + liftAssistant = runAssistant d + inAssistant = liftIO . liftAssistant + + {- When the client exits, it's restarted; + - if it keeps failing, back off to wait 5 minutes before + - trying it again. -} + retry client starttime = do + e <- client + now <- getCurrentTime + if diffUTCTime now starttime > 300 + then do + liftAssistant $ debug ["connection lost; reconnecting", show e] + retry client now + else do + liftAssistant $ debug ["connection failed; will retry", show e] + threadDelaySeconds (Seconds 300) + retry client =<< getCurrentTime + + runclient c = liftIO $ connectXMPP c $ \jid -> do + selfjid <- bindJID jid + putStanza gitAnnexSignature + + inAssistant $ debug ["connected", show selfjid] + {- The buddy list starts empty each time + - the client connects, so that stale info + - is not retained. -} + void $ inAssistant $ + updateBuddyList (const noBuddies) <<~ buddyList + + xmppThread $ receivenotifications selfjid + forever $ do + a <- inAssistant $ relayNetMessage selfjid + a + + receivenotifications selfjid = forever $ do + l <- decodeStanza selfjid <$> getStanza + -- inAssistant $ debug ["received:", show l] + mapM_ (handle selfjid) l + + handle _ (PresenceMessage p) = void $ inAssistant $ + updateBuddyList (updateBuddies p) <<~ buddyList + handle _ (GotNetMessage QueryPresence) = putStanza gitAnnexSignature + handle _ (GotNetMessage (NotifyPush us)) = void $ inAssistant $ pull us + handle selfjid (GotNetMessage (PairingNotification stage c u)) = + maybe noop (inAssistant . pairMsgReceived urlrenderer stage u selfjid) (parseJID c) + handle _ (GotNetMessage m@(Pushing _ pushstage)) + | isPushInitiation pushstage = inAssistant $ + unlessM (queueNetPushMessage m) $ + void $ forkIO <~> handlePushInitiation m + | otherwise = void $ inAssistant $ queueNetPushMessage m + handle _ (Ignorable _) = noop + handle _ (Unknown _) = noop + handle _ (ProtocolError _) = noop + + +data XMPPEvent + = GotNetMessage NetMessage + | PresenceMessage Presence + | Ignorable ReceivedStanza + | Unknown ReceivedStanza + | ProtocolError ReceivedStanza + deriving Show + +{- Decodes an XMPP stanza into one or more events. -} +decodeStanza :: JID -> ReceivedStanza -> [XMPPEvent] +decodeStanza selfjid s@(ReceivedPresence p) + | presenceType p == PresenceError = [ProtocolError s] + | presenceFrom p == Nothing = [Ignorable s] + | presenceFrom p == Just selfjid = [Ignorable s] + | otherwise = maybe [PresenceMessage p] decode (gitAnnexTagInfo p) + where + decode i + | tagAttr i == pushAttr = impliedp $ GotNetMessage $ NotifyPush $ + decodePushNotification (tagValue i) + | tagAttr i == queryAttr = impliedp $ GotNetMessage QueryPresence + | otherwise = [Unknown s] + {- Things sent via presence imply a presence message, + - along with their real meaning. -} + impliedp v = [PresenceMessage p, v] +decodeStanza selfjid s@(ReceivedMessage m) + | messageFrom m == Nothing = [Ignorable s] + | messageFrom m == Just selfjid = [Ignorable s] + | messageType m == MessageError = [ProtocolError s] + | otherwise = [fromMaybe (Unknown s) (GotNetMessage <$> decodeMessage m)] +decodeStanza _ s = [Unknown s] + +{- Waits for a NetMessager message to be sent, and relays it to XMPP. + - + - Chat messages must be directed to specific clients, not a base + - account JID, due to git-annex clients using a negative presence priority. + - PairingNotification messages are always directed at specific + - clients, but Pushing messages are sometimes not, and need to be exploded. + -} +relayNetMessage :: JID -> Assistant (XMPP ()) +relayNetMessage selfjid = convert =<< waitNetMessage + where + convert (NotifyPush us) = return $ putStanza $ pushNotification us + convert QueryPresence = return $ putStanza presenceQuery + convert (PairingNotification stage c u) = withclient c $ \tojid -> do + changeBuddyPairing tojid True + return $ putStanza $ pairingNotification stage u tojid selfjid + convert (Pushing c pushstage) = withclient c $ \tojid -> do + if tojid == baseJID tojid + then do + bud <- getBuddy (genBuddyKey tojid) <<~ buddyList + return $ forM_ (maybe [] (S.toList . buddyAssistants) bud) $ \(Client jid) -> + putStanza $ pushMessage pushstage jid selfjid + else return $ putStanza $ pushMessage pushstage tojid selfjid + + withclient c a = case parseJID c of + Nothing -> return noop + Just tojid + | tojid == selfjid -> return noop + | otherwise -> a tojid + +{- Runs a XMPP action in a separate thread, using a session to allow it + - to access the same XMPP client. -} +xmppThread :: XMPP () -> XMPP () +xmppThread a = do + s <- getSession + void $ liftIO $ forkIO $ + void $ runXMPP s a + +{- We only pull from one remote out of the set listed in the push + - notification, as an optimisation. + - + - Note that it might be possible (though very unlikely) for the push + - notification to take a while to be sent, and multiple pushes happen + - before it is sent, so it includes multiple remotes that were pushed + - to at different times. + - + - It could then be the case that the remote we choose had the earlier + - push sent to it, but then failed to get the later push, and so is not + - fully up-to-date. If that happens, the pushRetryThread will come along + - and retry the push, and we'll get another notification once it succeeds, + - and pull again. -} +pull :: [UUID] -> Assistant () +pull [] = noop +pull us = do + rs <- filter matching . syncGitRemotes <$> getDaemonStatus + debug $ "push notification for" : map (fromUUID . Remote.uuid ) rs + pullone rs =<< liftAnnex (inRepo Git.Branch.current) + where + matching r = Remote.uuid r `S.member` s + s = S.fromList us + + pullone [] _ = noop + pullone (r:rs) branch = + unlessM (all id . fst <$> manualPull branch [r]) $ + pullone rs branch + +pairMsgReceived :: UrlRenderer -> PairStage -> UUID -> JID -> JID -> Assistant () +pairMsgReceived urlrenderer PairReq theiruuid selfjid theirjid + | baseJID selfjid == baseJID theirjid = autoaccept + | otherwise = do + knownjids <- catMaybes . map (parseJID . getXMPPClientID) + . filter isXMPPRemote . syncRemotes <$> getDaemonStatus + if any (== baseJID theirjid) knownjids + then autoaccept + else showalert + + where + -- PairReq from another client using our JID, or the JID of + -- any repo we're already paired with is automatically accepted. + autoaccept = do + selfuuid <- liftAnnex getUUID + sendNetMessage $ + PairingNotification PairAck (formatJID theirjid) selfuuid + finishXMPPPairing theirjid theiruuid + -- Show an alert to let the user decide if they want to pair. + showalert = do + let route = ConfirmXMPPPairR (PairKey theiruuid $ formatJID theirjid) + url <- liftIO $ renderUrl urlrenderer route [] + close <- asIO1 removeAlert + void $ addAlert $ pairRequestReceivedAlert (T.unpack $ buddyName theirjid) + AlertButton + { buttonUrl = url + , buttonLabel = T.pack "Respond" + , buttonAction = Just close + } + +pairMsgReceived _ PairAck theiruuid _selfjid theirjid = + {- PairAck must come from one of the buddies we are pairing with; + - don't pair with just anyone. -} + whenM (isBuddyPairing theirjid) $ do + changeBuddyPairing theirjid False + selfuuid <- liftAnnex getUUID + sendNetMessage $ + PairingNotification PairDone (formatJID theirjid) selfuuid + finishXMPPPairing theirjid theiruuid + +pairMsgReceived _ PairDone _theiruuid _selfjid theirjid = + changeBuddyPairing theirjid False + +isBuddyPairing :: JID -> Assistant Bool +isBuddyPairing jid = maybe False buddyPairing <$> + getBuddy (genBuddyKey jid) <<~ buddyList + +changeBuddyPairing :: JID -> Bool -> Assistant () +changeBuddyPairing jid ispairing = + updateBuddyList (M.adjust set key) <<~ buddyList + where + key = genBuddyKey jid + set b = b { buddyPairing = ispairing } diff --git a/Assistant/TransferQueue.hs b/Assistant/TransferQueue.hs index 125b6d1..4d46b09 100644 --- a/Assistant/TransferQueue.hs +++ b/Assistant/TransferQueue.hs @@ -21,8 +21,9 @@ module Assistant.TransferQueue ( dequeueTransfers, ) where -import Common.Annex +import Assistant.Common import Assistant.DaemonStatus +import Assistant.Types.TransferQueue import Logs.Transfer import Types.Remote import qualified Remote @@ -32,24 +33,9 @@ import Annex.Wanted import Control.Concurrent.STM import qualified Data.Map as M -data TransferQueue = TransferQueue - { queuesize :: TVar Int - , queuelist :: TVar [(Transfer, TransferInfo)] - , deferreddownloads :: TVar [(Key, AssociatedFile)] - } - -data Schedule = Next | Later - deriving (Eq) - -newTransferQueue :: IO TransferQueue -newTransferQueue = atomically $ TransferQueue - <$> newTVar 0 - <*> newTVar [] - <*> newTVar [] - {- Reads the queue's content without blocking or changing it. -} -getTransferQueue :: TransferQueue -> IO [(Transfer, TransferInfo)] -getTransferQueue q = atomically $ readTVar $ queuelist q +getTransferQueue :: Assistant [(Transfer, TransferInfo)] +getTransferQueue = (atomically . readTVar . queuelist) <<~ transferQueue stubInfo :: AssociatedFile -> Remote -> TransferInfo stubInfo f r = stubTransferInfo @@ -59,101 +45,104 @@ stubInfo f r = stubTransferInfo {- Adds transfers to queue for some of the known remotes. - Honors preferred content settings, only transferring wanted files. -} -queueTransfers :: Schedule -> TransferQueue -> DaemonStatusHandle -> Key -> AssociatedFile -> Direction -> Annex () +queueTransfers :: Schedule -> Key -> AssociatedFile -> Direction -> Assistant () queueTransfers = queueTransfersMatching (const True) {- Adds transfers to queue for some of the known remotes, that match a - condition. Honors preferred content settings. -} -queueTransfersMatching :: (UUID -> Bool) -> Schedule -> TransferQueue -> DaemonStatusHandle -> Key -> AssociatedFile -> Direction -> Annex () -queueTransfersMatching matching schedule q dstatus k f direction - | direction == Download = whenM (wantGet f) go +queueTransfersMatching :: (UUID -> Bool) -> Schedule -> Key -> AssociatedFile -> Direction -> Assistant () +queueTransfersMatching matching schedule k f direction + | direction == Download = whenM (liftAnnex $ wantGet f) go | otherwise = go - where - go = do - rs <- sufficientremotes - =<< syncRemotes <$> liftIO (getDaemonStatus dstatus) - let matchingrs = filter (matching . Remote.uuid) rs - if null matchingrs - then defer - else forM_ matchingrs $ \r -> liftIO $ - enqueue schedule q dstatus (gentransfer r) (stubInfo f r) - sufficientremotes rs - {- Queue downloads from all remotes that - - have the key, with the cheapest ones first. - - More expensive ones will only be tried if - - downloading from a cheap one fails. -} - | direction == Download = do - uuids <- Remote.keyLocations k - return $ filter (\r -> uuid r `elem` uuids) rs - {- Upload to all remotes that want the content. -} - | otherwise = filterM (wantSend f . Remote.uuid) $ - filter (not . Remote.readonly) rs - gentransfer r = Transfer - { transferDirection = direction - , transferKey = k - , transferUUID = Remote.uuid r - } - defer - {- Defer this download, as no known remote has the key. -} - | direction == Download = void $ liftIO $ atomically $ - modifyTVar' (deferreddownloads q) $ - \l -> (k, f):l - | otherwise = noop + where + go = do + rs <- liftAnnex . sufficientremotes + =<< syncDataRemotes <$> getDaemonStatus + let matchingrs = filter (matching . Remote.uuid) rs + if null matchingrs + then defer + else forM_ matchingrs $ \r -> + enqueue schedule (gentransfer r) (stubInfo f r) + sufficientremotes rs + {- Queue downloads from all remotes that + - have the key, with the cheapest ones first. + - More expensive ones will only be tried if + - downloading from a cheap one fails. -} + | direction == Download = do + uuids <- Remote.keyLocations k + return $ filter (\r -> uuid r `elem` uuids) rs + {- Upload to all remotes that want the content. -} + | otherwise = filterM (wantSend f . Remote.uuid) $ + filter (not . Remote.readonly) rs + gentransfer r = Transfer + { transferDirection = direction + , transferKey = k + , transferUUID = Remote.uuid r + } + defer + {- Defer this download, as no known remote has the key. -} + | direction == Download = do + q <- getAssistant transferQueue + void $ liftIO $ atomically $ + modifyTVar' (deferreddownloads q) $ + \l -> (k, f):l + | otherwise = noop {- Queues any deferred downloads that can now be accomplished, leaving - any others in the list to try again later. -} -queueDeferredDownloads :: Schedule -> TransferQueue -> DaemonStatusHandle -> Annex () -queueDeferredDownloads schedule q dstatus = do +queueDeferredDownloads :: Schedule -> Assistant () +queueDeferredDownloads schedule = do + q <- getAssistant transferQueue l <- liftIO $ atomically $ swapTVar (deferreddownloads q) [] - rs <- syncRemotes <$> liftIO (getDaemonStatus dstatus) + rs <- syncDataRemotes <$> getDaemonStatus left <- filterM (queue rs) l unless (null left) $ liftIO $ atomically $ modifyTVar' (deferreddownloads q) $ \new -> new ++ left - where - queue rs (k, f) = do - uuids <- Remote.keyLocations k - let sources = filter (\r -> uuid r `elem` uuids) rs - unless (null sources) $ - forM_ sources $ \r -> liftIO $ - enqueue schedule q dstatus - (gentransfer r) (stubInfo f r) - return $ null sources - where - gentransfer r = Transfer - { transferDirection = Download - , transferKey = k - , transferUUID = Remote.uuid r - } - -enqueue :: Schedule -> TransferQueue -> DaemonStatusHandle -> Transfer -> TransferInfo -> IO () -enqueue schedule q dstatus t info + where + queue rs (k, f) = do + uuids <- liftAnnex $ Remote.keyLocations k + let sources = filter (\r -> uuid r `elem` uuids) rs + unless (null sources) $ + forM_ sources $ \r -> + enqueue schedule (gentransfer r) (stubInfo f r) + return $ null sources + where + gentransfer r = Transfer + { transferDirection = Download + , transferKey = k + , transferUUID = Remote.uuid r + } + +enqueue :: Schedule -> Transfer -> TransferInfo -> Assistant () +enqueue schedule t info | schedule == Next = go (new:) | otherwise = go (\l -> l++[new]) - where - new = (t, info) - go modlist = do - atomically $ do - void $ modifyTVar' (queuesize q) succ - void $ modifyTVar' (queuelist q) modlist - void $ notifyTransfer dstatus + where + new = (t, info) + go modlist = do + q <- getAssistant transferQueue + liftIO $ atomically $ do + void $ modifyTVar' (queuesize q) succ + void $ modifyTVar' (queuelist q) modlist + notifyTransfer {- Adds a transfer to the queue. -} -queueTransfer :: Schedule -> TransferQueue -> DaemonStatusHandle -> AssociatedFile -> Transfer -> Remote -> IO () -queueTransfer schedule q dstatus f t remote = - enqueue schedule q dstatus t (stubInfo f remote) +queueTransfer :: Schedule -> AssociatedFile -> Transfer -> Remote -> Assistant () +queueTransfer schedule f t remote = enqueue schedule t (stubInfo f remote) {- Blocks until the queue is no larger than a given size, and then adds a - transfer to the queue. -} -queueTransferAt :: Int -> Schedule -> TransferQueue -> DaemonStatusHandle -> AssociatedFile -> Transfer -> Remote -> IO () -queueTransferAt wantsz schedule q dstatus f t remote = do - atomically $ do +queueTransferAt :: Int -> Schedule -> AssociatedFile -> Transfer -> Remote -> Assistant () +queueTransferAt wantsz schedule f t remote = do + q <- getAssistant transferQueue + liftIO $ atomically $ do sz <- readTVar (queuesize q) unless (sz <= wantsz) $ retry -- blocks until queuesize changes - enqueue schedule q dstatus t (stubInfo f remote) + enqueue schedule t (stubInfo f remote) -queueTransferWhenSmall :: TransferQueue -> DaemonStatusHandle -> AssociatedFile -> Transfer -> Remote -> IO () +queueTransferWhenSmall :: AssociatedFile -> Transfer -> Remote -> Assistant () queueTransferWhenSmall = queueTransferAt 10 Later {- Blocks until a pending transfer is available in the queue, @@ -164,38 +153,45 @@ queueTransferWhenSmall = queueTransferAt 10 Later - - This is done in a single STM transaction, so there is no window - where an observer sees an inconsistent status. -} -getNextTransfer :: TransferQueue -> DaemonStatusHandle -> (TransferInfo -> Bool) -> IO (Maybe (Transfer, TransferInfo)) -getNextTransfer q dstatus acceptable = atomically $ do - sz <- readTVar (queuesize q) - if sz < 1 - then retry -- blocks until queuesize changes - else do - (r@(t,info):rest) <- readTVar (queuelist q) - writeTVar (queuelist q) rest - void $ modifyTVar' (queuesize q) pred - if acceptable info - then do - adjustTransfersSTM dstatus $ - M.insertWith' const t info - return $ Just r - else return Nothing +getNextTransfer :: (TransferInfo -> Bool) -> Assistant (Maybe (Transfer, TransferInfo)) +getNextTransfer acceptable = do + q <- getAssistant transferQueue + dstatus <- getAssistant daemonStatusHandle + liftIO $ atomically $ do + sz <- readTVar (queuesize q) + if sz < 1 + then retry -- blocks until queuesize changes + else do + (r@(t,info):rest) <- readTVar (queuelist q) + writeTVar (queuelist q) rest + void $ modifyTVar' (queuesize q) pred + if acceptable info + then do + adjustTransfersSTM dstatus $ + M.insertWith' const t info + return $ Just r + else return Nothing {- Moves transfers matching a condition from the queue, to the - currentTransfers map. -} -getMatchingTransfers :: TransferQueue -> DaemonStatusHandle -> (Transfer -> Bool) -> IO [(Transfer, TransferInfo)] -getMatchingTransfers q dstatus c = atomically $ do - ts <- dequeueTransfersSTM q c - unless (null ts) $ - adjustTransfersSTM dstatus $ \m -> M.union m $ M.fromList ts - return ts +getMatchingTransfers :: (Transfer -> Bool) -> Assistant [(Transfer, TransferInfo)] +getMatchingTransfers c = do + q <- getAssistant transferQueue + dstatus <- getAssistant daemonStatusHandle + liftIO $ atomically $ do + ts <- dequeueTransfersSTM q c + unless (null ts) $ + adjustTransfersSTM dstatus $ \m -> M.union m $ M.fromList ts + return ts {- Removes transfers matching a condition from the queue, and returns the - removed transfers. -} -dequeueTransfers :: TransferQueue -> DaemonStatusHandle -> (Transfer -> Bool) -> IO [(Transfer, TransferInfo)] -dequeueTransfers q dstatus c = do - removed <- atomically $ dequeueTransfersSTM q c +dequeueTransfers :: (Transfer -> Bool) -> Assistant [(Transfer, TransferInfo)] +dequeueTransfers c = do + q <- getAssistant transferQueue + removed <- liftIO $ atomically $ dequeueTransfersSTM q c unless (null removed) $ - notifyTransfer dstatus + notifyTransfer return removed dequeueTransfersSTM :: TransferQueue -> (Transfer -> Bool) -> STM [(Transfer, TransferInfo)] diff --git a/Assistant/TransferSlots.hs b/Assistant/TransferSlots.hs index 9e9156a..7c9f747 100644 --- a/Assistant/TransferSlots.hs +++ b/Assistant/TransferSlots.hs @@ -5,56 +5,34 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE DeriveDataTypeable #-} - module Assistant.TransferSlots where -import Common.Annex +import Assistant.Common import Utility.ThreadScheduler +import Assistant.Types.TransferSlots import Assistant.DaemonStatus import Logs.Transfer import qualified Control.Exception as E import Control.Concurrent -import Data.Typeable - -type TransferSlots = QSemN - -{- A special exception that can be thrown to pause or resume a transfer, while - - keeping its slot in use. -} -data TransferException = PauseTransfer | ResumeTransfer - deriving (Show, Eq, Typeable) +import qualified Control.Concurrent.MSemN as MSemN -instance E.Exception TransferException - -type TransferSlotRunner = DaemonStatusHandle -> TransferSlots -> TransferGenerator -> IO () -type TransferGenerator = IO (Maybe (Transfer, TransferInfo, IO ())) - -{- Number of concurrent transfers allowed to be run from the assistant. - - - - Transfers launched by other means, including by remote assistants, - - do not currently take up slots. - -} -numSlots :: Int -numSlots = 1 - -newTransferSlots :: IO TransferSlots -newTransferSlots = newQSemN numSlots +type TransferGenerator = Assistant (Maybe (Transfer, TransferInfo, Assistant ())) {- Waits until a transfer slot becomes available, then runs a - TransferGenerator, and then runs the transfer action in its own thread. -} -inTransferSlot :: TransferSlotRunner -inTransferSlot dstatus s gen = do - waitQSemN s 1 - runTransferThread dstatus s =<< gen +inTransferSlot :: TransferGenerator -> Assistant () +inTransferSlot gen = do + flip MSemN.wait 1 <<~ transferSlots + runTransferThread =<< gen {- Runs a TransferGenerator, and its transfer action, - without waiting for a slot to become available. -} -inImmediateTransferSlot :: TransferSlotRunner -inImmediateTransferSlot dstatus s gen = do - signalQSemN s (-1) - runTransferThread dstatus s =<< gen +inImmediateTransferSlot :: TransferGenerator -> Assistant () +inImmediateTransferSlot gen = do + flip MSemN.signal (-1) <<~ transferSlots + runTransferThread =<< gen {- Runs a transfer action, in an already allocated transfer slot. - Once it finishes, frees the transfer slot. @@ -66,24 +44,30 @@ inImmediateTransferSlot dstatus s gen = do - then pausing the thread until a ResumeTransfer exception is raised, - then rerunning the action. -} -runTransferThread :: DaemonStatusHandle -> TransferSlots -> Maybe (Transfer, TransferInfo, IO ()) -> IO () -runTransferThread _ s Nothing = signalQSemN s 1 -runTransferThread dstatus s (Just (t, info, a)) = do - tid <- forkIO go - updateTransferInfo dstatus t $ info { transferTid = Just tid } - where - go = catchPauseResume a - pause = catchPauseResume $ runEvery (Seconds 86400) noop - {- Note: This must use E.try, rather than E.catch. - - When E.catch is used, and has called go in its exception - - handler, Control.Concurrent.throwTo will block sometimes - - when signaling. Using E.try avoids the problem. -} - catchPauseResume a' = do - r <- E.try a' :: IO (Either E.SomeException ()) - case r of - Left e -> case E.fromException e of - Just PauseTransfer -> pause - Just ResumeTransfer -> go - _ -> done +runTransferThread :: Maybe (Transfer, TransferInfo, Assistant ()) -> Assistant () +runTransferThread Nothing = flip MSemN.signal 1 <<~ transferSlots +runTransferThread (Just (t, info, a)) = do + d <- getAssistant id + aio <- asIO a + tid <- liftIO $ forkIO $ runTransferThread' d aio + updateTransferInfo t $ info { transferTid = Just tid } + +runTransferThread' :: AssistantData -> IO () -> IO () +runTransferThread' d a = go + where + go = catchPauseResume a + pause = catchPauseResume $ runEvery (Seconds 86400) noop + {- Note: This must use E.try, rather than E.catch. + - When E.catch is used, and has called go in its exception + - handler, Control.Concurrent.throwTo will block sometimes + - when signaling. Using E.try avoids the problem. -} + catchPauseResume a' = do + r <- E.try a' :: IO (Either E.SomeException ()) + case r of + Left e -> case E.fromException e of + Just PauseTransfer -> pause + Just ResumeTransfer -> go _ -> done - done = signalQSemN s 1 + _ -> done + done = runAssistant d $ + flip MSemN.signal 1 <<~ transferSlots diff --git a/Assistant/Types/BranchChange.hs b/Assistant/Types/BranchChange.hs new file mode 100644 index 0000000..399abee --- /dev/null +++ b/Assistant/Types/BranchChange.hs @@ -0,0 +1,19 @@ +{- git-annex assistant git-annex branch change tracking + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.BranchChange where + +import Control.Concurrent.MSampleVar +import Common.Annex + +newtype BranchChangeHandle = BranchChangeHandle (MSampleVar ()) + +newBranchChangeHandle :: IO BranchChangeHandle +newBranchChangeHandle = BranchChangeHandle <$> newEmptySV + +fromBranchChangeHandle :: BranchChangeHandle -> MSampleVar () +fromBranchChangeHandle (BranchChangeHandle v) = v diff --git a/Assistant/Types/Buddies.hs b/Assistant/Types/Buddies.hs new file mode 100644 index 0000000..36d8a4f --- /dev/null +++ b/Assistant/Types/Buddies.hs @@ -0,0 +1,80 @@ +{- git-annex assistant buddies + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +{-# LANGUAGE CPP #-} + +module Assistant.Types.Buddies where + +import Common.Annex + +import qualified Data.Map as M +import Control.Concurrent.STM +import Utility.NotificationBroadcaster +import Data.Text as T + +{- For simplicity, dummy types are defined even when XMPP is disabled. -} +#ifdef WITH_XMPP +import Network.Protocol.XMPP +import Data.Set as S +import Data.Ord + +newtype Client = Client JID + deriving (Eq, Show) + +instance Ord Client where + compare = comparing show + +data Buddy = Buddy + { buddyPresent :: S.Set Client + , buddyAway :: S.Set Client + , buddyAssistants :: S.Set Client + , buddyPairing :: Bool + } +#else +data Buddy = Buddy +#endif + deriving (Eq, Show) + +data BuddyKey = BuddyKey T.Text + deriving (Eq, Ord, Show, Read) + +data PairKey = PairKey UUID T.Text + deriving (Eq, Ord, Show, Read) + +type Buddies = M.Map BuddyKey Buddy + +{- A list of buddies, and a way to notify when it changes. -} +type BuddyList = (TMVar Buddies, NotificationBroadcaster) + +noBuddies :: Buddies +noBuddies = M.empty + +newBuddyList :: IO BuddyList +newBuddyList = (,) + <$> atomically (newTMVar noBuddies) + <*> newNotificationBroadcaster + +getBuddyList :: BuddyList -> IO [Buddy] +getBuddyList (v, _) = M.elems <$> atomically (readTMVar v) + +getBuddy :: BuddyKey -> BuddyList -> IO (Maybe Buddy) +getBuddy k (v, _) = M.lookup k <$> atomically (readTMVar v) + +getBuddyBroadcaster :: BuddyList -> NotificationBroadcaster +getBuddyBroadcaster (_, h) = h + +{- Applies a function to modify the buddy list, and if it's changed, + - sends notifications to any listeners. -} +updateBuddyList :: (Buddies -> Buddies) -> BuddyList -> IO () +updateBuddyList a (v, caster) = do + changed <- atomically $ do + buds <- takeTMVar v + let buds' = a buds + putTMVar v buds' + return $ buds /= buds' + when changed $ + sendNotification caster diff --git a/Assistant/Types/Changes.hs b/Assistant/Types/Changes.hs new file mode 100644 index 0000000..887aa81 --- /dev/null +++ b/Assistant/Types/Changes.hs @@ -0,0 +1,54 @@ +{- git-annex assistant change tracking + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.Changes where + +import Types.KeySource +import Utility.TSet + +import Data.Time.Clock + +data ChangeType = AddChange | LinkChange | RmChange | RmDirChange + deriving (Show, Eq) + +type ChangeChan = TSet Change + +data Change + = Change + { changeTime :: UTCTime + , changeFile :: FilePath + , changeType :: ChangeType + } + | PendingAddChange + { changeTime ::UTCTime + , changeFile :: FilePath + } + | InProcessAddChange + { changeTime ::UTCTime + , keySource :: KeySource + } + deriving (Show) + +newChangeChan :: IO ChangeChan +newChangeChan = newTSet + +isPendingAddChange :: Change -> Bool +isPendingAddChange (PendingAddChange {}) = True +isPendingAddChange _ = False + +isInProcessAddChange :: Change -> Bool +isInProcessAddChange (InProcessAddChange {}) = True +isInProcessAddChange _ = False + +finishedChange :: Change -> Change +finishedChange c@(InProcessAddChange { keySource = ks }) = Change + { changeTime = changeTime c + , changeFile = keyFilename ks + , changeType = AddChange + } +finishedChange c = c + diff --git a/Assistant/Types/Commits.hs b/Assistant/Types/Commits.hs new file mode 100644 index 0000000..bb17c57 --- /dev/null +++ b/Assistant/Types/Commits.hs @@ -0,0 +1,17 @@ +{- git-annex assistant commit tracking + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.Commits where + +import Utility.TSet + +type CommitChan = TSet Commit + +data Commit = Commit + +newCommitChan :: IO CommitChan +newCommitChan = newTSet diff --git a/Assistant/Types/DaemonStatus.hs b/Assistant/Types/DaemonStatus.hs new file mode 100644 index 0000000..df0928d --- /dev/null +++ b/Assistant/Types/DaemonStatus.hs @@ -0,0 +1,72 @@ +{- git-annex assistant daemon status + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +{-# LANGUAGE RankNTypes, ImpredicativeTypes #-} + +module Assistant.Types.DaemonStatus where + +import Common.Annex +import Assistant.Alert +import Assistant.Pairing +import Utility.NotificationBroadcaster +import Logs.Transfer + +import Control.Concurrent.STM +import Data.Time.Clock.POSIX +import qualified Data.Map as M + +data DaemonStatus = DaemonStatus + -- False when the daemon is performing its startup scan + { scanComplete :: Bool + -- Time when a previous process of the daemon was running ok + , lastRunning :: Maybe POSIXTime + -- True when the sanity checker is running + , sanityCheckRunning :: Bool + -- Last time the sanity checker ran + , lastSanityCheck :: Maybe POSIXTime + -- Currently running file content transfers + , currentTransfers :: TransferMap + -- Messages to display to the user. + , alertMap :: AlertMap + , lastAlertId :: AlertId + -- Ordered list of all remotes that can be synced with + , syncRemotes :: [Remote] + -- Ordered list of remotes to sync git with + , syncGitRemotes :: [Remote] + -- Ordered list of remotes to sync data with + , syncDataRemotes :: [Remote] + -- Pairing request that is in progress. + , pairingInProgress :: Maybe PairingInProgress + -- Broadcasts notifications about all changes to the DaemonStatus + , changeNotifier :: NotificationBroadcaster + -- Broadcasts notifications when queued or current transfers change. + , transferNotifier :: NotificationBroadcaster + -- Broadcasts notifications when there's a change to the alerts + , alertNotifier :: NotificationBroadcaster + } + +type TransferMap = M.Map Transfer TransferInfo + +{- This TMVar is never left empty, so accessing it will never block. -} +type DaemonStatusHandle = TMVar DaemonStatus + +newDaemonStatus :: IO DaemonStatus +newDaemonStatus = DaemonStatus + <$> pure False + <*> pure Nothing + <*> pure False + <*> pure Nothing + <*> pure M.empty + <*> pure M.empty + <*> pure firstAlertId + <*> pure [] + <*> pure [] + <*> pure [] + <*> pure Nothing + <*> newNotificationBroadcaster + <*> newNotificationBroadcaster + <*> newNotificationBroadcaster diff --git a/Assistant/Types/NamedThread.hs b/Assistant/Types/NamedThread.hs new file mode 100644 index 0000000..569f787 --- /dev/null +++ b/Assistant/Types/NamedThread.hs @@ -0,0 +1,21 @@ +{- git-annex assistant named threads. + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.NamedThread where + +import Common.Annex +import Assistant.Monad + +import System.Log.Logger + +type ThreadName = String +data NamedThread = NamedThread ThreadName (Assistant ()) + +debug :: [String] -> Assistant () +debug ws = do + name <- getAssistant threadName + liftIO $ debugM name $ unwords $ (name ++ ":") : ws diff --git a/Assistant/Types/NetMessager.hs b/Assistant/Types/NetMessager.hs new file mode 100644 index 0000000..c036d62 --- /dev/null +++ b/Assistant/Types/NetMessager.hs @@ -0,0 +1,101 @@ +{- git-annex assistant out of band network messager types + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.NetMessager where + +import Common.Annex +import Assistant.Pairing + +import Data.Text (Text) +import Control.Concurrent.STM +import Control.Concurrent.MSampleVar +import Data.ByteString (ByteString) +import qualified Data.Set as S + +{- Messages that can be sent out of band by a network messager. -} +data NetMessage + -- indicate that pushes have been made to the repos with these uuids + = NotifyPush [UUID] + -- requests other clients to inform us of their presence + | QueryPresence + -- notification about a stage in the pairing process, + -- involving a client, and a UUID. + | PairingNotification PairStage ClientID UUID + -- used for git push over the network messager + | Pushing ClientID PushStage + deriving (Show, Eq, Ord) + +{- Something used to identify the client, or clients to send the message to. -} +type ClientID = Text + +data PushStage + -- indicates that we have data to push over the out of band network + = CanPush + -- request that a git push be sent over the out of band network + | PushRequest + -- indicates that a push is starting + | StartingPush + -- a chunk of output of git receive-pack + | ReceivePackOutput ByteString + -- a chuck of output of git send-pack + | SendPackOutput ByteString + -- sent when git receive-pack exits, with its exit code + | ReceivePackDone ExitCode + deriving (Show, Eq, Ord) + +{- Things that initiate either side of a push, but do not actually send data. -} +isPushInitiation :: PushStage -> Bool +isPushInitiation CanPush = True +isPushInitiation PushRequest = True +isPushInitiation StartingPush = True +isPushInitiation _ = False + +data PushSide = SendPack | ReceivePack + deriving (Eq, Ord) + +pushDestinationSide :: PushStage -> PushSide +pushDestinationSide CanPush = ReceivePack +pushDestinationSide PushRequest = SendPack +pushDestinationSide StartingPush = ReceivePack +pushDestinationSide (ReceivePackOutput _) = SendPack +pushDestinationSide (SendPackOutput _) = ReceivePack +pushDestinationSide (ReceivePackDone _) = SendPack + +type SideMap a = PushSide -> a + +mkSideMap :: STM a -> IO (SideMap a) +mkSideMap gen = do + (sp, rp) <- atomically $ (,) <$> gen <*> gen + return $ lookupside sp rp + where + lookupside sp _ SendPack = sp + lookupside _ rp ReceivePack = rp + +getSide :: PushSide -> SideMap a -> a +getSide side m = m side + +data NetMessager = NetMessager + -- outgoing messages + { netMessages :: TChan (NetMessage) + -- write to this to restart the net messager + , netMessagerRestart :: MSampleVar () + -- only one side of a push can be running at a time + , netMessagerPushRunning :: SideMap (TMVar (Maybe ClientID)) + -- incoming messages related to a running push + , netMessagesPush :: SideMap (TChan NetMessage) + -- incoming push messages, deferred to be processed later + , netMessagesPushDeferred :: SideMap (TMVar (S.Set NetMessage)) + } + +newNetMessager :: IO NetMessager +newNetMessager = NetMessager + <$> atomically newTChan + <*> newEmptySV + <*> mkSideMap (newTMVar Nothing) + <*> mkSideMap newTChan + <*> mkSideMap (newTMVar S.empty) + where diff --git a/Assistant/Types/Pushes.hs b/Assistant/Types/Pushes.hs new file mode 100644 index 0000000..99e0ee1 --- /dev/null +++ b/Assistant/Types/Pushes.hs @@ -0,0 +1,24 @@ +{- git-annex assistant push tracking + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.Pushes where + +import Common.Annex + +import Control.Concurrent.STM +import Data.Time.Clock +import qualified Data.Map as M + +{- Track the most recent push failure for each remote. -} +type PushMap = M.Map Remote UTCTime +type FailedPushMap = TMVar PushMap + +{- The TMVar starts empty, and is left empty when there are no + - failed pushes. This way we can block until there are some failed pushes. + -} +newFailedPushMap :: IO FailedPushMap +newFailedPushMap = atomically newEmptyTMVar diff --git a/Assistant/Types/ScanRemotes.hs b/Assistant/Types/ScanRemotes.hs new file mode 100644 index 0000000..d2f0c58 --- /dev/null +++ b/Assistant/Types/ScanRemotes.hs @@ -0,0 +1,25 @@ +{- git-annex assistant remotes needing scanning + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.ScanRemotes where + +import Common.Annex + +import Control.Concurrent.STM +import qualified Data.Map as M + +data ScanInfo = ScanInfo + { scanPriority :: Int + , fullScan :: Bool + } + +type ScanRemoteMap = TMVar (M.Map Remote ScanInfo) + +{- The TMVar starts empty, and is left empty when there are no remotes + - to scan. -} +newScanRemoteMap :: IO ScanRemoteMap +newScanRemoteMap = atomically newEmptyTMVar diff --git a/Assistant/Types/ThreadedMonad.hs b/Assistant/Types/ThreadedMonad.hs new file mode 100644 index 0000000..1a2aa7e --- /dev/null +++ b/Assistant/Types/ThreadedMonad.hs @@ -0,0 +1,38 @@ +{- making the Annex monad available across threads + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.ThreadedMonad where + +import Common.Annex +import qualified Annex + +import Control.Concurrent +import Data.Tuple + +{- The Annex state is stored in a MVar, so that threaded actions can access + - it. -} +type ThreadState = MVar Annex.AnnexState + +{- Stores the Annex state in a MVar. + - + - Once the action is finished, retrieves the state from the MVar. + -} +withThreadState :: (ThreadState -> Annex a) -> Annex a +withThreadState a = do + state <- Annex.getState id + mvar <- liftIO $ newMVar state + r <- a mvar + newstate <- liftIO $ takeMVar mvar + Annex.changeState (const newstate) + return r + +{- Runs an Annex action, using the state from the MVar. + - + - This serializes calls by threads; only one thread can run in Annex at a + - time. -} +runThreadState :: ThreadState -> Annex a -> IO a +runThreadState mvar a = modifyMVar mvar $ \state -> swap <$> Annex.run state a diff --git a/Assistant/Types/TransferQueue.hs b/Assistant/Types/TransferQueue.hs new file mode 100644 index 0000000..6620ebd --- /dev/null +++ b/Assistant/Types/TransferQueue.hs @@ -0,0 +1,29 @@ +{- git-annex assistant pending transfer queue + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.Types.TransferQueue where + +import Common.Annex +import Logs.Transfer +import Types.Remote + +import Control.Concurrent.STM + +data TransferQueue = TransferQueue + { queuesize :: TVar Int + , queuelist :: TVar [(Transfer, TransferInfo)] + , deferreddownloads :: TVar [(Key, AssociatedFile)] + } + +data Schedule = Next | Later + deriving (Eq) + +newTransferQueue :: IO TransferQueue +newTransferQueue = atomically $ TransferQueue + <$> newTVar 0 + <*> newTVar [] + <*> newTVar [] diff --git a/Assistant/Types/TransferSlots.hs b/Assistant/Types/TransferSlots.hs new file mode 100644 index 0000000..5140995 --- /dev/null +++ b/Assistant/Types/TransferSlots.hs @@ -0,0 +1,34 @@ +{- git-annex assistant transfer slots + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +{-# LANGUAGE DeriveDataTypeable #-} + +module Assistant.Types.TransferSlots where + +import qualified Control.Exception as E +import qualified Control.Concurrent.MSemN as MSemN +import Data.Typeable + +type TransferSlots = MSemN.MSemN Int + +{- A special exception that can be thrown to pause or resume a transfer, while + - keeping its slot in use. -} +data TransferException = PauseTransfer | ResumeTransfer + deriving (Show, Eq, Typeable) + +instance E.Exception TransferException + +{- Number of concurrent transfers allowed to be run from the assistant. + - + - Transfers launched by other means, including by remote assistants, + - do not currently take up slots. + -} +numSlots :: Int +numSlots = 1 + +newTransferSlots :: IO TransferSlots +newTransferSlots = MSemN.new numSlots diff --git a/Assistant/WebApp.hs b/Assistant/WebApp.hs index 0b59ccc..325f27f 100644 --- a/Assistant/WebApp.hs +++ b/Assistant/WebApp.hs @@ -11,8 +11,6 @@ module Assistant.WebApp where import Assistant.WebApp.Types import Assistant.Common -import Assistant.ThreadedMonad -import Assistant.DaemonStatus import Utility.NotificationBroadcaster import Utility.Yesod import Locations.UserConfig @@ -62,8 +60,8 @@ bootstrap navbaritem content = do addScript $ StaticR js_bootstrap_modal_js $(widgetFile "page") hamletToRepHtml $(hamletFile $ hamletTemplate "bootstrap") - where - navdetails i = (navBarName i, navBarRoute i, Just i == navbaritem) + where + navdetails i = (navBarName i, navBarRoute i, Just i == navbaritem) newWebAppState :: IO (TMVar WebAppState) newWebAppState = do @@ -72,15 +70,18 @@ newWebAppState = do { showIntro = True , otherRepos = otherrepos } +liftAssistant :: forall sub a. (Assistant a) -> GHandler sub WebApp a +liftAssistant a = liftIO . flip runAssistant a =<< assistantData <$> getYesod + getWebAppState :: forall sub. GHandler sub WebApp WebAppState getWebAppState = liftIO . atomically . readTMVar =<< webAppState <$> getYesod modifyWebAppState :: forall sub. (WebAppState -> WebAppState) -> GHandler sub WebApp () modifyWebAppState a = go =<< webAppState <$> getYesod - where - go s = liftIO $ atomically $ do - v <- takeTMVar s - putTMVar s $ a v + where + go s = liftIO $ atomically $ do + v <- takeTMVar s + putTMVar s $ a v {- Runs an Annex action from the webapp. - @@ -88,24 +89,20 @@ modifyWebAppState a = go =<< webAppState <$> getYesod - value is returned. -} runAnnex :: forall sub a. a -> Annex a -> GHandler sub WebApp a -runAnnex fallback a = maybe (return fallback) go =<< threadState <$> getYesod - where - go st = liftIO $ runThreadState st a - -waitNotifier :: forall sub. (DaemonStatus -> NotificationBroadcaster) -> NotificationId -> GHandler sub WebApp () -waitNotifier selector nid = do - notifier <- getNotifier selector - liftIO $ waitNotification $ notificationHandleFromId notifier nid - -newNotifier :: forall sub. (DaemonStatus -> NotificationBroadcaster) -> GHandler sub WebApp NotificationId -newNotifier selector = do - notifier <- getNotifier selector - liftIO $ notificationHandleToId <$> newNotificationHandle notifier - -getNotifier :: forall sub. (DaemonStatus -> NotificationBroadcaster) -> GHandler sub WebApp NotificationBroadcaster -getNotifier selector = do - webapp <- getYesod - liftIO $ selector <$> getDaemonStatus (daemonStatus webapp) +runAnnex fallback a = ifM (noAnnex <$> getYesod) + ( return fallback + , liftAssistant $ liftAnnex a + ) + +waitNotifier :: forall sub. (Assistant NotificationBroadcaster) -> NotificationId -> GHandler sub WebApp () +waitNotifier getbroadcaster nid = liftAssistant $ do + b <- getbroadcaster + liftIO $ waitNotification $ notificationHandleFromId b nid + +newNotifier :: forall sub. (Assistant NotificationBroadcaster) -> GHandler sub WebApp NotificationId +newNotifier getbroadcaster = liftAssistant $ do + b <- getbroadcaster + liftIO $ notificationHandleToId <$> newNotificationHandle b {- Adds the auth parameter as a hidden field on a form. Must be put into - every form. -} diff --git a/Assistant/WebApp/Configurators.hs b/Assistant/WebApp/Configurators.hs index a1b22d7..89ce503 100644 --- a/Assistant/WebApp/Configurators.hs +++ b/Assistant/WebApp/Configurators.hs @@ -5,17 +5,17 @@ - Licensed under the GNU AGPL version 3 or higher. -} -{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes #-} +{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings, RankNTypes, CPP #-} module Assistant.WebApp.Configurators where import Assistant.Common +import Assistant.DaemonStatus import Assistant.WebApp import Assistant.WebApp.Types import Assistant.WebApp.SideBar import Assistant.WebApp.Utility import Assistant.WebApp.Configurators.Local -import Assistant.DaemonStatus import Utility.Yesod import qualified Remote import qualified Types.Remote as Remote @@ -23,6 +23,10 @@ import Annex.UUID (getUUID) import Logs.Remote import Logs.Trust import Config +import qualified Git +#ifdef WITH_XMPP +import Assistant.XMPP.Client +#endif import Yesod import Data.Text (Text) @@ -33,6 +37,11 @@ getConfigR :: Handler RepHtml getConfigR = ifM (inFirstRun) ( getFirstRepositoryR , bootstrap (Just Config) $ do +#ifdef WITH_XMPP + xmppconfigured <- lift $ runAnnex False $ isJust <$> getXMPPCreds +#else + let xmppconfigured = False +#endif sideBarDisplay setTitle "Configuration" $(widgetFile "configurators/main") @@ -42,21 +51,24 @@ getConfigR = ifM (inFirstRun) introDisplay :: Text -> Widget introDisplay ident = do webapp <- lift getYesod - repolist <- lift $ repoList True False + repolist <- lift $ repoList False True False let n = length repolist let numrepos = show n - let notenough = n < enough $(widgetFile "configurators/intro") lift $ modifyWebAppState $ \s -> s { showIntro = False } - where - enough = 2 + +makeMiscRepositories :: Widget +makeMiscRepositories = $(widgetFile "configurators/repositories/misc") + +makeCloudRepositories :: Widget +makeCloudRepositories = $(widgetFile "configurators/repositories/cloud") {- Lists known repositories, followed by options to add more. -} getRepositoriesR :: Handler RepHtml getRepositoriesR = bootstrap (Just Config) $ do sideBarDisplay setTitle "Repositories" - repolist <- lift $ repoList False True + repolist <- lift $ repoList False False True $(widgetFile "configurators/repositories") data Actions @@ -91,44 +103,61 @@ notSyncing :: Actions -> Bool notSyncing (SyncingRepoActions _ _) = False notSyncing _ = True +repoTable :: RepoList -> Widget +repoTable repolist = $(widgetFile "configurators/repositories/table") + +type RepoList = [(String, String, Actions)] + {- A numbered list of known repositories, - with actions that can be taken on them. -} -repoList :: Bool -> Bool -> Handler [(String, String, Actions)] -repoList onlyconfigured includehere +repoList :: Bool -> Bool -> Bool -> Handler RepoList +repoList onlycloud onlyconfigured includehere | onlyconfigured = list =<< configured | otherwise = list =<< (++) <$> configured <*> rest - where - configured = do - rs <- filter (not . Remote.readonly) . syncRemotes <$> - (liftIO . getDaemonStatus =<< daemonStatus <$> getYesod) - runAnnex [] $ do - u <- getUUID - let l = map Remote.uuid rs - let l' = if includehere then u : l else l - return $ zip l' $ map mkSyncingRepoActions l' - rest = runAnnex [] $ do - m <- readRemoteLog - unconfigured <- catMaybes . map (findtype m) . snd - <$> (trustPartition DeadTrusted $ M.keys m) - unsyncable <- map Remote.uuid <$> - (filterM (\r -> not <$> repoSyncable (Remote.repo r)) - =<< Remote.enabledRemoteList) - return $ zip unsyncable (map mkNotSyncingRepoActions unsyncable) ++ unconfigured - findtype m u = case M.lookup u m of - Nothing -> Nothing - Just c -> case M.lookup "type" c of - Just "rsync" -> u `enableswith` EnableRsyncR - Just "directory" -> u `enableswith` EnableDirectoryR - Just "S3" -> u `enableswith` EnableS3R - _ -> Nothing - u `enableswith` r = Just (u, DisabledRepoActions $ r u) - list l = runAnnex [] $ do - let l' = nubBy (\x y -> fst x == fst y) l - zip3 - <$> pure counter - <*> Remote.prettyListUUIDs (map fst l') - <*> pure (map snd l') - counter = map show ([1..] :: [Int]) + where + configured = do + rs <- filter wantedrepo . syncRemotes + <$> liftAssistant getDaemonStatus + runAnnex [] $ do + u <- getUUID + let l = map Remote.uuid rs + let l' = if includehere then u : l else l + return $ zip l' $ map mkSyncingRepoActions l' + rest = runAnnex [] $ do + m <- readRemoteLog + unconfigured <- map snd . catMaybes . filter wantedremote + . map (findinfo m) + <$> (trustExclude DeadTrusted $ M.keys m) + unsyncable <- map Remote.uuid . filter wantedrepo <$> + (filterM (\r -> not <$> repoSyncable (Remote.repo r)) + =<< Remote.enabledRemoteList) + return $ zip unsyncable (map mkNotSyncingRepoActions unsyncable) ++ unconfigured + wantedrepo r + | Remote.readonly r = False + | onlycloud = Git.repoIsUrl (Remote.repo r) && not (isXMPPRemote r) + | otherwise = True + wantedremote Nothing = False + wantedremote (Just (iscloud, _)) + | onlycloud = iscloud + | otherwise = True + findinfo m u = case M.lookup u m of + Nothing -> Nothing + Just c -> case M.lookup "type" c of + Just "rsync" -> val True EnableRsyncR + Just "directory" -> val False EnableDirectoryR +#ifdef WITH_S3 + Just "S3" -> val True EnableS3R +#endif + _ -> Nothing + where + val iscloud r = Just (iscloud, (u, DisabledRepoActions $ r u)) + list l = runAnnex [] $ do + let l' = nubBy (\x y -> fst x == fst y) l + zip3 + <$> pure counter + <*> Remote.prettyListUUIDs (map fst l') + <*> pure (map snd l') + counter = map show ([1..] :: [Int]) getEnableSyncR :: UUID -> Handler () getEnableSyncR = flipSync True diff --git a/Assistant/WebApp/Configurators/Edit.hs b/Assistant/WebApp/Configurators/Edit.hs index bb48737..e732123 100644 --- a/Assistant/WebApp/Configurators/Edit.hs +++ b/Assistant/WebApp/Configurators/Edit.hs @@ -16,6 +16,7 @@ import Assistant.WebApp.SideBar import Assistant.WebApp.Utility import Assistant.DaemonStatus import Assistant.MakeRemote (uniqueRemoteName) +import Assistant.WebApp.Configurators.XMPP (xmppNeeded) import Utility.Yesod import qualified Remote import qualified Remote.List as Remote @@ -26,6 +27,7 @@ import Types.StandardGroups import qualified Config import qualified Git import qualified Git.Command +import qualified Git.Config import Yesod import Data.Text (Text) @@ -50,17 +52,18 @@ getRepoConfig uuid r mremote = RepoConfig <*> (maybe Nothing (Just . T.pack) . M.lookup uuid <$> uuidMap) <*> getrepogroup <*> Config.repoSyncable r - where - getrepogroup = do - groups <- lookupGroups uuid - return $ - maybe (RepoGroupCustom $ unwords $ S.toList groups) RepoGroupStandard - (getStandardGroup groups) + where + getrepogroup = do + groups <- lookupGroups uuid + return $ + maybe (RepoGroupCustom $ unwords $ S.toList groups) RepoGroupStandard + (getStandardGroup groups) setRepoConfig :: UUID -> Maybe Remote -> RepoConfig -> RepoConfig -> Handler () setRepoConfig uuid mremote oldc newc = do - when (repoDescription oldc /= repoDescription newc) $ runAnnex undefined $ + when (repoDescription oldc /= repoDescription newc) $ runAnnex undefined $ do maybe noop (describeUUID uuid . T.unpack) (repoDescription newc) + void uuidMapLoad when (repoGroup oldc /= repoGroup newc) $ runAnnex undefined $ case repoGroup newc of RepoGroupStandard g -> setStandardGroup uuid g @@ -68,16 +71,25 @@ setRepoConfig uuid mremote oldc newc = do when (repoSyncable oldc /= repoSyncable newc) $ changeSyncable mremote (repoSyncable newc) when (isJust mremote && repoName oldc /= repoName newc) $ do - dstatus <- daemonStatus <$> getYesod runAnnex undefined $ do name <- fromRepo $ uniqueRemoteName (T.unpack $ repoName newc) 0 + {- git remote rename expects there to be a + - remote..fetch, and exits nonzero if + - there's not. Special remotes don't normally + - have that, and don't use it. Temporarily add + - it if it's missing. -} + let remotefetch = "remote." ++ T.unpack (repoName oldc) ++ ".fetch" + needfetch <- isNothing <$> fromRepo (Git.Config.getMaybe remotefetch) + when needfetch $ + inRepo $ Git.Command.run "config" + [Param remotefetch, Param ""] inRepo $ Git.Command.run "remote" [ Param "rename" , Param $ T.unpack $ repoName oldc , Param name ] void $ Remote.remoteListRefresh - updateSyncRemotes dstatus + liftAssistant updateSyncRemotes editRepositoryAForm :: RepoConfig -> AForm WebApp WebApp RepoConfig editRepositoryAForm def = RepoConfig @@ -85,14 +97,14 @@ editRepositoryAForm def = RepoConfig <*> aopt textField "Description" (Just $ repoDescription def) <*> areq (selectFieldList $ customgroups++standardgroups) "Repository group" (Just $ repoGroup def) <*> areq checkBoxField "Syncing enabled" (Just $ repoSyncable def) - where - standardgroups :: [(Text, RepoGroup)] - standardgroups = map (\g -> (T.pack $ descStandardGroup g , RepoGroupStandard g)) - [minBound :: StandardGroup .. maxBound :: StandardGroup] - customgroups :: [(Text, RepoGroup)] - customgroups = case repoGroup def of - RepoGroupCustom s -> [(T.pack s, RepoGroupCustom s)] - _ -> [] + where + standardgroups :: [(Text, RepoGroup)] + standardgroups = map (\g -> (T.pack $ descStandardGroup g , RepoGroupStandard g)) + [minBound :: StandardGroup .. maxBound :: StandardGroup] + customgroups :: [(Text, RepoGroup)] + customgroups = case repoGroup def of + RepoGroupCustom s -> [(T.pack s, RepoGroupCustom s)] + _ -> [] getEditRepositoryR :: UUID -> Handler RepHtml getEditRepositoryR = editForm False @@ -100,6 +112,9 @@ getEditRepositoryR = editForm False getEditNewRepositoryR :: UUID -> Handler RepHtml getEditNewRepositoryR = editForm True +getEditNewCloudRepositoryR :: UUID -> Handler RepHtml +getEditNewCloudRepositoryR uuid = xmppNeeded >> editForm True uuid + editForm :: Bool -> UUID -> Handler RepHtml editForm new uuid = bootstrap (Just Config) $ do sideBarDisplay @@ -114,8 +129,8 @@ editForm new uuid = bootstrap (Just Config) $ do setRepoConfig uuid mremote curr input redirect RepositoriesR _ -> showform form enctype curr - where - showform form enctype curr = do - let istransfer = repoGroup curr == RepoGroupStandard TransferGroup - let authtoken = webAppFormAuthToken - $(widgetFile "configurators/editrepository") + where + showform form enctype curr = do + let istransfer = repoGroup curr == RepoGroupStandard TransferGroup + let authtoken = webAppFormAuthToken + $(widgetFile "configurators/editrepository") diff --git a/Assistant/WebApp/Configurators/Local.hs b/Assistant/WebApp/Configurators/Local.hs index d02cecf..f146504 100644 --- a/Assistant/WebApp/Configurators/Local.hs +++ b/Assistant/WebApp/Configurators/Local.hs @@ -20,6 +20,7 @@ import Init import qualified Git import qualified Git.Construct import qualified Git.Config +import qualified Git.Command import qualified Annex import Locations.UserConfig import Utility.FreeDesktop @@ -31,6 +32,7 @@ import Remote (prettyListUUIDs) import Annex.UUID import Types.StandardGroups import Logs.PreferredContent +import Utility.UserInfo import Yesod import Data.Text (Text) @@ -48,17 +50,17 @@ data RepositoryPath = RepositoryPath Text - to use as a repository. -} repositoryPathField :: forall sub. Bool -> Field sub WebApp Text repositoryPathField autofocus = Field { fieldParse = parse, fieldView = view } - where - view idAttr nameAttr attrs val isReq = - [whamlet||] + where + view idAttr nameAttr attrs val isReq = + [whamlet||] - parse [path] - | T.null path = nopath - | otherwise = liftIO $ checkRepositoryPath path - parse [] = return $ Right Nothing - parse _ = nopath + parse [path] + | T.null path = nopath + | otherwise = liftIO $ checkRepositoryPath path + parse [] = return $ Right Nothing + parse _ = nopath - nopath = return $ Left "Enter a location for the repository" + nopath = return $ Left "Enter a location for the repository" {- As well as checking the path for a lot of silly things, tilde is - expanded in the returned path. -} @@ -81,14 +83,10 @@ checkRepositoryPath p = do case headMaybe problems of Nothing -> Right $ Just $ T.pack basepath Just prob -> Left prob - where - runcheck (chk, msg) = ifM (chk) - ( return $ Just msg - , return Nothing - ) - expandTilde home ('~':'/':path) = home path - expandTilde _ path = path - + where + runcheck (chk, msg) = ifM (chk) ( return $ Just msg, return Nothing ) + expandTilde home ('~':'/':path) = home path + expandTilde _ path = path {- On first run, if run in the home directory, default to putting it in - ~/Desktop/annex, when a Desktop directory exists, and ~/annex otherwise. @@ -102,13 +100,13 @@ defaultRepositoryPath firstrun = do if home == cwd && firstrun then inhome else ifM (canWrite cwd) ( return cwd, inhome ) - where - inhome = do - desktop <- userDesktopDir - ifM (doesDirectoryExist desktop) - ( relHome $ desktop gitAnnexAssistantDefaultDir - , return $ "~" gitAnnexAssistantDefaultDir - ) + where + inhome = do + desktop <- userDesktopDir + ifM (doesDirectoryExist desktop) + ( relHome $ desktop gitAnnexAssistantDefaultDir + , return $ "~" gitAnnexAssistantDefaultDir + ) newRepositoryForm :: FilePath -> Form RepositoryPath newRepositoryForm defpath msg = do @@ -162,17 +160,17 @@ selectDriveForm :: [RemovableDrive] -> Maybe RemovableDrive -> Form RemovableDri selectDriveForm drives def = renderBootstrap $ RemovableDrive <$> pure Nothing <*> areq (selectFieldList pairs) "Select drive:" (mountPoint <$> def) - where - pairs = zip (map describe drives) (map mountPoint drives) - describe drive = case diskFree drive of - Nothing -> mountPoint drive - Just free -> - let sz = roughSize storageUnits True free - in T.unwords - [ mountPoint drive - , T.concat ["(", T.pack sz] - , "free)" - ] + where + pairs = zip (map describe drives) (map mountPoint drives) + describe drive = case diskFree drive of + Nothing -> mountPoint drive + Just free -> + let sz = roughSize storageUnits True free + in T.unwords + [ mountPoint drive + , T.concat ["(", T.pack sz] + , "free)" + ] {- Adding a removable drive. -} getAddDriveR :: Handler RepHtml @@ -190,33 +188,32 @@ getAddDriveR = bootstrap (Just Config) $ do _ -> do let authtoken = webAppFormAuthToken $(widgetFile "configurators/adddrive") - where - make mountpoint = do - liftIO $ makerepo dir - u <- liftIO $ initRepo dir $ Just remotename - r <- addremote dir remotename - runAnnex () $ setStandardGroup u TransferGroup - syncRemote r - return u - where - dir = mountpoint gitAnnexAssistantDefaultDir - remotename = takeFileName mountpoint - {- The repo may already exist, when adding removable media - - that has already been used elsewhere. -} - makerepo dir = liftIO $ do - r <- E.try (inDir dir $ return True) :: IO (Either E.SomeException Bool) - case r of - Right _ -> noop - Left _e -> do - createDirectoryIfMissing True dir - makeRepo dir True - {- Each repository is made a remote of the other. -} - addremote dir name = runAnnex undefined $ do - hostname <- maybe "host" id <$> liftIO getHostname - hostlocation <- fromRepo Git.repoLocation - liftIO $ inDir dir $ - void $ makeGitRemote hostname hostlocation - addRemote $ makeGitRemote name dir + where + make mountpoint = do + liftIO $ makerepo dir + u <- liftIO $ initRepo dir $ Just remotename + r <- addremote dir remotename + runAnnex () $ setStandardGroup u TransferGroup + syncRemote r + return u + where + dir = mountpoint gitAnnexAssistantDefaultDir + remotename = takeFileName mountpoint + {- The repo may already exist, when adding removable media + - that has already been used elsewhere. -} + makerepo dir = liftIO $ do + r <- E.try (inDir dir $ return True) :: IO (Either E.SomeException Bool) + case r of + Right _ -> noop + Left _e -> do + createDirectoryIfMissing True dir + makeRepo dir True + {- Each repository is made a remote of the other. -} + addremote dir name = runAnnex undefined $ do + hostname <- maybe "host" id <$> liftIO getHostname + hostlocation <- fromRepo Git.repoLocation + liftIO $ inDir dir $ void $ makeGitRemote hostname hostlocation + addRemote $ makeGitRemote name dir getEnableDirectoryR :: UUID -> Handler RepHtml getEnableDirectoryR uuid = bootstrap (Just Config) $ do @@ -229,23 +226,23 @@ getEnableDirectoryR uuid = bootstrap (Just Config) $ do {- List of removable drives. -} driveList :: IO [RemovableDrive] driveList = mapM (gen . mnt_dir) =<< filter sane <$> getMounts - where - gen dir = RemovableDrive - <$> getDiskFree dir - <*> pure (T.pack dir) - -- filter out some things that are surely not removable drives - sane Mntent { mnt_dir = dir, mnt_fsname = dev } - {- We want real disks like /dev/foo, not - - dummy mount points like proc or tmpfs or - - gvfs-fuse-daemon. -} - | not ('/' `elem` dev) = False - {- Just in case: These mount points are surely not - - removable disks. -} - | dir == "/" = False - | dir == "/tmp" = False - | dir == "/run/shm" = False - | dir == "/run/lock" = False - | otherwise = True + where + gen dir = RemovableDrive + <$> getDiskFree dir + <*> pure (T.pack dir) + -- filter out some things that are surely not removable drives + sane Mntent { mnt_dir = dir, mnt_fsname = dev } + {- We want real disks like /dev/foo, not + - dummy mount points like proc or tmpfs or + - gvfs-fuse-daemon. -} + | not ('/' `elem` dev) = False + {- Just in case: These mount points are surely not + - removable disks. -} + | dir == "/" = False + | dir == "/tmp" = False + | dir == "/run/shm" = False + | dir == "/run/lock" = False + | otherwise = True {- Bootstraps from first run mode to a fully running assistant in a - repository, by running the postFirstRun callback, which returns the @@ -268,11 +265,11 @@ makeRepo :: FilePath -> Bool -> IO () makeRepo path bare = do unlessM (boolSystem "git" params) $ error "git init failed!" - where - baseparams = [Param "init", Param "--quiet"] - params - | bare = baseparams ++ [Param "--bare", File path] - | otherwise = baseparams ++ [File path] + where + baseparams = [Param "init", Param "--quiet"] + params + | bare = baseparams ++ [Param "--bare", File path] + | otherwise = baseparams ++ [File path] {- Runs an action in the git-annex repository in the specified directory. -} inDir :: FilePath -> Annex a -> IO a @@ -280,11 +277,20 @@ inDir dir a = do state <- Annex.new =<< Git.Config.read =<< Git.Construct.fromPath dir Annex.eval state a -{- Initializes a git-annex repository in a directory with a description. -} initRepo :: FilePath -> Maybe String -> IO UUID initRepo dir desc = inDir dir $ do + {- Initialize a git-annex repository in a directory with a description. -} unlessM isInitialized $ initialize desc + unlessM (Git.Config.isBare <$> gitRepo) $ + {- Initialize the master branch, so things that expect + - to have it will work, before any files are added. -} + void $ inRepo $ Git.Command.runBool "commit" + [ Param "--quiet" + , Param "--allow-empty" + , Param "-m" + , Param "created repository" + ] getUUID {- Adds a directory to the autostart file. -} @@ -310,9 +316,9 @@ canMakeSymlink dir = ifM (doesDirectoryExist dir) ( catchBoolIO $ test dir , canMakeSymlink (parentDir dir) ) - where - test d = do - let link = d "delete.me" - createSymbolicLink link link - removeLink link - return True + where + test d = do + let link = d "delete.me" + createSymbolicLink link link + removeLink link + return True diff --git a/Assistant/WebApp/Configurators/Pairing.hs b/Assistant/WebApp/Configurators/Pairing.hs index dd82a99..c6e9874 100644 --- a/Assistant/WebApp/Configurators/Pairing.hs +++ b/Assistant/WebApp/Configurators/Pairing.hs @@ -14,6 +14,7 @@ import Assistant.Pairing import Assistant.WebApp import Assistant.WebApp.Types import Assistant.WebApp.SideBar +import Assistant.Types.Buddies import Utility.Yesod #ifdef WITH_PAIRING import Assistant.Common @@ -26,6 +27,19 @@ import Utility.Verifiable import Utility.Network import Annex.UUID #endif +#ifdef WITH_XMPP +import Assistant.XMPP +import Assistant.XMPP.Client +import Assistant.XMPP.Buddies +import Assistant.XMPP.Git +import Network.Protocol.XMPP +import Assistant.Types.NetMessager +import Assistant.NetMessager +import Assistant.WebApp.Configurators +import Assistant.WebApp.Configurators.XMPP +#endif +import Utility.UserInfo +import Git import Yesod import Data.Text (Text) @@ -34,49 +48,132 @@ import qualified Data.Text as T import qualified Data.Text.Encoding as T import qualified Data.ByteString.Lazy as B import Data.Char -import System.Posix.User import qualified Control.Exception as E import Control.Concurrent #endif +#ifdef WITH_XMPP +import qualified Data.Set as S +#endif + +getStartXMPPPairR :: Handler RepHtml +#ifdef WITH_XMPP +getStartXMPPPairR = ifM (isJust <$> runAnnex Nothing getXMPPCreds) + ( do + {- Ask buddies to send presence info, to get + - the buddy list populated. -} + liftAssistant $ sendNetMessage QueryPresence + pairPage $ + $(widgetFile "configurators/pairing/xmpp/prompt") + , redirect XMPPForPairingR -- go get XMPP configured, then come back + ) +#else +getStartXMPPPairR = noXMPPPairing + +noXMPPPairing :: Handler RepHtml +noXMPPPairing = noPairing "XMPP" +#endif + +{- Does pairing with an XMPP buddy, or with other clients sharing an + - XMPP account. -} +getRunningXMPPPairR :: BuddyKey -> Handler RepHtml +#ifdef WITH_XMPP +getRunningXMPPPairR bid = do + buddy <- liftAssistant $ getBuddy bid <<~ buddyList + go $ S.toList . buddyAssistants <$> buddy + where + go (Just (clients@((Client exemplar):_))) = do + creds <- runAnnex Nothing getXMPPCreds + let ourjid = fromJust $ parseJID =<< xmppJID <$> creds + let samejid = baseJID ourjid == baseJID exemplar + liftAssistant $ do + u <- liftAnnex getUUID + forM_ clients $ \(Client c) -> sendNetMessage $ + PairingNotification PairReq (formatJID c) u + xmppPairEnd True $ if samejid then Nothing else Just exemplar + -- A buddy could have logged out, or the XMPP client restarted, + -- and there be no clients to message; handle unforseen by going back. + go _ = redirect StartXMPPPairR +#else +getRunningXMPPPairR _ = noXMPPPairing +#endif -{- Starts sending out pair requests. -} -getStartPairR :: Handler RepHtml +{- Starts local pairing. -} +getStartLocalPairR :: Handler RepHtml #ifdef WITH_PAIRING -getStartPairR = promptSecret Nothing $ startPairing PairReq noop pairingAlert Nothing +getStartLocalPairR = promptSecret Nothing $ + startLocalPairing PairReq noop pairingAlert Nothing #else -getStartPairR = noPairing +getStartLocalPairR = noLocalPairing + +noLocalPairing :: Handler RepHtml +noLocalPairing = noPairing "local" #endif -{- Runs on the system that responds to a pair request; sets up the ssh +{- Runs on the system that responds to a local pair request; sets up the ssh - authorized key first so that the originating host can immediately sync - with us. -} -getFinishPairR :: PairMsg -> Handler RepHtml +getFinishLocalPairR :: PairMsg -> Handler RepHtml #ifdef WITH_PAIRING -getFinishPairR msg = promptSecret (Just msg) $ \_ secret -> do - liftIO $ setup - startPairing PairAck cleanup alert uuid "" secret - where - alert = pairRequestAcknowledgedAlert (pairRepo msg) . Just - setup = setupAuthorizedKeys msg - cleanup = removeAuthorizedKeys False $ - remoteSshPubKey $ pairMsgData msg - uuid = Just $ pairUUID $ pairMsgData msg +getFinishLocalPairR msg = promptSecret (Just msg) $ \_ secret -> do + repodir <- lift $ repoPath <$> runAnnex undefined gitRepo + liftIO $ setup repodir + startLocalPairing PairAck (cleanup repodir) alert uuid "" secret + where + alert = pairRequestAcknowledgedAlert (pairRepo msg) . Just + setup repodir = setupAuthorizedKeys msg repodir + cleanup repodir = removeAuthorizedKeys False repodir $ + remoteSshPubKey $ pairMsgData msg + uuid = Just $ pairUUID $ pairMsgData msg #else -getFinishPairR _ = noPairing +getFinishLocalPairR _ = noLocalPairing +#endif + +getConfirmXMPPPairR :: PairKey -> Handler RepHtml +#ifdef WITH_XMPP +getConfirmXMPPPairR pairkey@(PairKey _ t) = case parseJID t of + Nothing -> error "bad JID" + Just theirjid -> pairPage $ do + let name = buddyName theirjid + $(widgetFile "configurators/pairing/xmpp/confirm") +#else +getConfirmXMPPPairR _ = noXMPPPairing +#endif + +getFinishXMPPPairR :: PairKey -> Handler RepHtml +#ifdef WITH_XMPP +getFinishXMPPPairR (PairKey theiruuid t) = case parseJID t of + Nothing -> error "bad JID" + Just theirjid -> do + liftAssistant $ do + selfuuid <- liftAnnex getUUID + sendNetMessage $ + PairingNotification PairAck (formatJID theirjid) selfuuid + finishXMPPPairing theirjid theiruuid + xmppPairEnd False $ Just theirjid +#else +getFinishXMPPPairR _ = noXMPPPairing +#endif + +#ifdef WITH_XMPP +xmppPairEnd :: Bool -> Maybe JID -> Handler RepHtml +xmppPairEnd inprogress theirjid = pairPage $ do + let friend = buddyName <$> theirjid + cloudrepolist <- lift $ repoList True False False + $(widgetFile "configurators/pairing/xmpp/end") #endif -getInprogressPairR :: SecretReminder -> Handler RepHtml +getRunningLocalPairR :: SecretReminder -> Handler RepHtml #ifdef WITH_PAIRING -getInprogressPairR s = pairPage $ do +getRunningLocalPairR s = pairPage $ do let secret = fromSecretReminder s - $(widgetFile "configurators/pairing/inprogress") + $(widgetFile "configurators/pairing/local/inprogress") #else -getInprogressPairR _ = noPairing +getRunningLocalPairR _ = noLocalPairing #endif #ifdef WITH_PAIRING -{- Starts pairing, at either the PairReq (initiating host) or +{- Starts local pairing, at either the PairReq (initiating host) or - PairAck (responding host) stage. - - Displays an alert, and starts a thread sending the pairing message, @@ -85,48 +182,49 @@ getInprogressPairR _ = noPairing - - Redirects to the pairing in progress page. -} -startPairing :: PairStage -> IO () -> (AlertButton -> Alert) -> Maybe UUID -> Text -> Secret -> Widget -startPairing stage oncancel alert muuid displaysecret secret = do - dstatus <- daemonStatus <$> lift getYesod +startLocalPairing :: PairStage -> IO () -> (AlertButton -> Alert) -> Maybe UUID -> Text -> Secret -> Widget +startLocalPairing stage oncancel alert muuid displaysecret secret = do urlrender <- lift getUrlRender reldir <- fromJust . relDir <$> lift getYesod + sendrequests <- lift $ liftAssistant $ asIO2 $ mksendrequests urlrender {- Generating a ssh key pair can take a while, so do it in the - background. -} - void $ liftIO $ forkIO $ do - keypair <- genSshKeyPair - pairdata <- PairData + thread <- lift $ liftAssistant $ asIO $ do + keypair <- liftIO $ genSshKeyPair + pairdata <- liftIO $ PairData <$> getHostname - <*> getUserName + <*> myUserName <*> pure reldir <*> pure (sshPubKey keypair) <*> (maybe genUUID return muuid) let sender = multicastPairMsg Nothing secret pairdata let pip = PairingInProgress secret Nothing keypair pairdata stage - startSending dstatus pip stage $ sendrequests sender dstatus urlrender - - lift $ redirect $ InprogressPairR $ toSecretReminder displaysecret - where - {- Sends pairing messages until the thread is killed, - - and shows an activity alert while doing it. - - - - The cancel button returns the user to the HomeR. This is - - not ideal, but they have to be sent somewhere, and could - - have been on a page specific to the in-process pairing - - that just stopped, so can't go back there. - -} - sendrequests sender dstatus urlrender _stage = do - tid <- myThreadId - let selfdestruct = AlertButton - { buttonLabel = "Cancel" - , buttonUrl = urlrender HomeR - , buttonAction = Just $ const $ do - oncancel - killThread tid - } - alertDuring dstatus (alert selfdestruct) $ do - _ <- E.try (sender stage) :: IO (Either E.SomeException ()) - return () + startSending pip stage $ sendrequests sender + void $ liftIO $ forkIO thread + + lift $ redirect $ RunningLocalPairR $ toSecretReminder displaysecret + where + {- Sends pairing messages until the thread is killed, + - and shows an activity alert while doing it. + - + - The cancel button returns the user to the HomeR. This is + - not ideal, but they have to be sent somewhere, and could + - have been on a page specific to the in-process pairing + - that just stopped, so can't go back there. + -} + mksendrequests urlrender sender _stage = do + tid <- liftIO myThreadId + let selfdestruct = AlertButton + { buttonLabel = "Cancel" + , buttonUrl = urlrender HomeR + , buttonAction = Just $ const $ do + oncancel + killThread tid + } + alertDuring (alert selfdestruct) $ liftIO $ do + _ <- E.try (sender stage) :: IO (Either E.SomeException ()) + return () data InputSecret = InputSecret { secretText :: Maybe Text } @@ -152,18 +250,18 @@ promptSecret msg cont = pairPage $ do else showform form enctype $ Just "That's not the right secret phrase." _ -> showform form enctype Nothing - where - showform form enctype mproblem = do - let start = isNothing msg - let badphrase = isJust mproblem - let problem = fromMaybe "" mproblem - let (username, hostname) = maybe ("", "") - (\(_, v, a) -> (T.pack $ remoteUserName v, T.pack $ fromMaybe (showAddr a) (remoteHostName v))) - (verifiableVal . fromPairMsg <$> msg) - u <- T.pack <$> liftIO getUserName - let sameusername = username == u - let authtoken = webAppFormAuthToken - $(widgetFile "configurators/pairing/prompt") + where + showform form enctype mproblem = do + let start = isNothing msg + let badphrase = isJust mproblem + let problem = fromMaybe "" mproblem + let (username, hostname) = maybe ("", "") + (\(_, v, a) -> (T.pack $ remoteUserName v, T.pack $ fromMaybe (showAddr a) (remoteHostName v))) + (verifiableVal . fromPairMsg <$> msg) + u <- T.pack <$> liftIO myUserName + let sameusername = username == u + let authtoken = webAppFormAuthToken + $(widgetFile "configurators/pairing/local/prompt") {- This counts unicode characters as more than one character, - but that's ok; they *do* provide additional entropy. -} @@ -177,15 +275,6 @@ secretProblem s toSecret :: Text -> Secret toSecret s = B.fromChunks [T.encodeUtf8 $ T.toLower $ T.filter isAlphaNum s] -getUserName :: IO String -getUserName = userName <$> (getUserEntryForID =<< getEffectiveUserID) - -pairPage :: Widget -> Handler RepHtml -pairPage w = bootstrap (Just Config) $ do - sideBarDisplay - setTitle "Pairing" - w - {- From Dickens -} sampleQuote :: Text sampleQuote = T.unwords @@ -197,8 +286,14 @@ sampleQuote = T.unwords #else -noPairing :: Handler RepHtml -noPairing = pairPage $ - $(widgetFile "configurators/pairing/disabled") - #endif + +pairPage :: Widget -> Handler RepHtml +pairPage w = bootstrap (Just Config) $ do + sideBarDisplay + setTitle "Pairing" + w + +noPairing :: Text -> Handler RepHtml +noPairing pairingtype = pairPage $ + $(widgetFile "configurators/pairing/disabled") diff --git a/Assistant/WebApp/Configurators/S3.hs b/Assistant/WebApp/Configurators/S3.hs index cd019be..42355ea 100644 --- a/Assistant/WebApp/Configurators/S3.hs +++ b/Assistant/WebApp/Configurators/S3.hs @@ -15,7 +15,6 @@ import Assistant.Sync import Assistant.WebApp import Assistant.WebApp.Types import Assistant.WebApp.SideBar -import Assistant.ThreadedMonad import Utility.Yesod import qualified Remote.S3 as S3 import Logs.Remote @@ -63,12 +62,12 @@ s3InputAForm = S3Input <*> areq textField "Datacenter" (Just "US") <*> areq (selectFieldList storageclasses) "Storage class" (Just StandardRedundancy) <*> areq textField "Repository name" (Just "S3") - where - storageclasses :: [(Text, StorageClass)] - storageclasses = - [ ("Standard redundancy", StandardRedundancy) - , ("Reduced redundancy (costs less)", ReducedRedundancy) - ] + where + storageclasses :: [(Text, StorageClass)] + storageclasses = + [ ("Standard redundancy", StandardRedundancy) + , ("Reduced redundancy (costs less)", ReducedRedundancy) + ] s3CredsAForm :: AForm WebApp WebApp S3Creds s3CredsAForm = S3Creds @@ -89,12 +88,12 @@ getAddS3R = s3Configurator $ do , ("storageclass", show $ storageClass s3input) ] _ -> showform form enctype - where - showform form enctype = do - let authtoken = webAppFormAuthToken - $(widgetFile "configurators/adds3") - setgroup r = runAnnex () $ - setStandardGroup (Remote.uuid r) TransferGroup + where + showform form enctype = do + let authtoken = webAppFormAuthToken + $(widgetFile "configurators/adds3") + setgroup r = runAnnex () $ + setStandardGroup (Remote.uuid r) TransferGroup getEnableS3R :: UUID -> Handler RepHtml getEnableS3R uuid = s3Configurator $ do @@ -107,22 +106,20 @@ getEnableS3R uuid = s3Configurator $ do fromJust $ M.lookup uuid m makeS3Remote s3creds name (const noop) M.empty _ -> showform form enctype - where - showform form enctype = do - let authtoken = webAppFormAuthToken - description <- lift $ runAnnex "" $ - T.pack . concat <$> Remote.prettyListUUIDs [uuid] - $(widgetFile "configurators/enables3") + where + showform form enctype = do + let authtoken = webAppFormAuthToken + description <- lift $ runAnnex "" $ + T.pack . concat <$> Remote.prettyListUUIDs [uuid] + $(widgetFile "configurators/enables3") makeS3Remote :: S3Creds -> String -> (Remote -> Handler ()) -> RemoteConfig -> Handler () makeS3Remote (S3Creds ak sk) name setup config = do - webapp <- getYesod - let st = fromJust $ threadState webapp remotename <- runAnnex name $ fromRepo $ uniqueRemoteName name 0 liftIO $ S3.s3SetCredsEnv ( T.unpack ak, T.unpack sk) - r <- liftIO $ runThreadState st $ addRemote $ do + r <- liftAssistant $ liftAnnex $ addRemote $ do makeSpecialRemote name S3.remote config return remotename setup r - liftIO $ syncNewRemote st (daemonStatus webapp) (scanRemotes webapp) r - redirect $ EditNewRepositoryR $ Remote.uuid r + liftAssistant $ syncNewRemote r + redirect $ EditNewCloudRepositoryR $ Remote.uuid r diff --git a/Assistant/WebApp/Configurators/Ssh.hs b/Assistant/WebApp/Configurators/Ssh.hs index 3edc5a5..7353f61 100644 --- a/Assistant/WebApp/Configurators/Ssh.hs +++ b/Assistant/WebApp/Configurators/Ssh.hs @@ -21,13 +21,13 @@ import Logs.Remote import Remote import Logs.PreferredContent import Types.StandardGroups +import Utility.UserInfo import Yesod import Data.Text (Text) import qualified Data.Text as T import qualified Data.Map as M import Network.Socket -import System.Posix.User sshConfigurator :: Widget -> Handler RepHtml sshConfigurator a = bootstrap (Just Config) $ do @@ -61,25 +61,25 @@ sshInputAForm def = SshInput <$> aopt check_hostname "Host name" (Just $ hostname def) <*> aopt check_username "User name" (Just $ username def) <*> aopt textField "Directory" (Just $ Just $ fromMaybe (T.pack gitAnnexAssistantDefaultDir) $ directory def) - where - check_hostname = checkM (liftIO . checkdns) textField - checkdns t = do - let h = T.unpack t - r <- catchMaybeIO $ getAddrInfo canonname (Just h) Nothing - return $ case catMaybes . map addrCanonName <$> r of - -- canonicalize input hostname if it had no dot - Just (fullname:_) - | '.' `elem` h -> Right t - | otherwise -> Right $ T.pack fullname - Just [] -> Right t - Nothing -> Left bad_hostname - canonname = Just $ defaultHints { addrFlags = [AI_CANONNAME] } + where + check_hostname = checkM (liftIO . checkdns) textField + checkdns t = do + let h = T.unpack t + r <- catchMaybeIO $ getAddrInfo canonname (Just h) Nothing + return $ case catMaybes . map addrCanonName <$> r of + -- canonicalize input hostname if it had no dot + Just (fullname:_) + | '.' `elem` h -> Right t + | otherwise -> Right $ T.pack fullname + Just [] -> Right t + Nothing -> Left bad_hostname + canonname = Just $ defaultHints { addrFlags = [AI_CANONNAME] } - check_username = checkBool (all (`notElem` "/:@ \t") . T.unpack) - bad_username textField + check_username = checkBool (all (`notElem` "/:@ \t") . T.unpack) + bad_username textField - bad_hostname = "cannot resolve host name" :: Text - bad_username = "bad user name" :: Text + bad_hostname = "cannot resolve host name" :: Text + bad_username = "bad user name" :: Text data ServerStatus = UntestedServer @@ -96,8 +96,7 @@ usable UsableSshInput = True getAddSshR :: Handler RepHtml getAddSshR = sshConfigurator $ do - u <- liftIO $ T.pack . userName - <$> (getUserEntryForID =<< getEffectiveUserID) + u <- liftIO $ T.pack <$> myUserName ((result, form), enctype) <- lift $ runFormGet $ renderBootstrap $ sshInputAForm $ SshInput Nothing (Just u) Nothing @@ -108,10 +107,10 @@ getAddSshR = sshConfigurator $ do Left status -> showform form enctype status Right sshdata -> lift $ redirect $ ConfirmSshR sshdata _ -> showform form enctype UntestedServer - where - showform form enctype status = do - let authtoken = webAppFormAuthToken - $(widgetFile "configurators/ssh/add") + where + showform form enctype status = do + let authtoken = webAppFormAuthToken + $(widgetFile "configurators/ssh/add") {- To enable an existing rsync special remote, parse the SshInput from - its rsyncurl, and display a form whose only real purpose is to check @@ -123,31 +122,31 @@ getAddSshR = sshConfigurator $ do -} getEnableRsyncR :: UUID -> Handler RepHtml getEnableRsyncR u = do - m <- runAnnex M.empty readRemoteLog - case parseSshRsyncUrl =<< M.lookup "rsyncurl" =<< M.lookup u m of - Nothing -> redirect AddSshR - Just sshinput -> sshConfigurator $ do + m <- fromMaybe M.empty . M.lookup u <$> runAnnex M.empty readRemoteLog + case (parseSshRsyncUrl =<< M.lookup "rsyncurl" m, M.lookup "name" m) of + (Just sshinput, Just reponame) -> sshConfigurator $ do ((result, form), enctype) <- lift $ runFormGet $ renderBootstrap $ sshInputAForm sshinput case result of FormSuccess sshinput' | isRsyncNet (hostname sshinput') -> - void $ lift $ makeRsyncNet sshinput' (const noop) + void $ lift $ makeRsyncNet sshinput' reponame (const noop) | otherwise -> do s <- liftIO $ testServer sshinput' case s of Left status -> showform form enctype status Right sshdata -> enable sshdata + { sshRepoName = reponame } _ -> showform form enctype UntestedServer - where - showform form enctype status = do - description <- lift $ runAnnex "" $ - T.pack . concat <$> prettyListUUIDs [u] - let authtoken = webAppFormAuthToken - $(widgetFile "configurators/ssh/enable") - enable sshdata = - lift $ redirect $ ConfirmSshR $ - sshdata { rsyncOnly = True } + _ -> redirect AddSshR + where + showform form enctype status = do + description <- lift $ runAnnex "" $ + T.pack . concat <$> prettyListUUIDs [u] + let authtoken = webAppFormAuthToken + $(widgetFile "configurators/ssh/enable") + enable sshdata = lift $ redirect $ ConfirmSshR $ + sshdata { rsyncOnly = True } {- Converts a rsyncurl value to a SshInput. But only if it's a ssh rsync - url; rsync:// urls or bare path names are not supported. @@ -164,12 +163,12 @@ parseSshRsyncUrl u , username = if null user then Nothing else val user , directory = val dir } - where - val = Just . T.pack - (userhost, dir) = separate (== ':') u - (user, host) = if '@' `elem` userhost - then separate (== '@') userhost - else (userhost, "") + where + val = Just . T.pack + (userhost, dir) = separate (== ':') u + (user, host) = if '@' `elem` userhost + then separate (== '@') userhost + else (userhost, "") {- Test if we can ssh into the server. - @@ -179,7 +178,7 @@ parseSshRsyncUrl u - a special ssh key will need to be generated just for this server. - - Once logged into the server, probe to see if git-annex-shell is - - available, or rsync. Note that on OSX, ~/.ssh/git-annex-shell may be + - available, or rsync. Note that, ~/.ssh/git-annex-shell may be - present, while git-annex-shell is not in PATH. -} testServer :: SshInput -> IO (Either ServerStatus SshData) @@ -194,44 +193,43 @@ testServer sshinput@(SshInput { hostname = Just hn }) = do if usable status' then ret status' True else return $ Left status' - where - ret status needspubkey = return $ Right $ - (mkSshData sshinput) - { needsPubKey = needspubkey - , rsyncOnly = status == UsableRsyncServer - } - probe extraopts = do - let remotecommand = join ";" - [ report "loggedin" - , checkcommand "git-annex-shell" - , checkcommand "rsync" - , checkcommand osx_shim - ] - knownhost <- knownHost hn - let sshopts = filter (not . null) $ extraopts ++ - {- If this is an already known host, let - - ssh check it as usual. - - Otherwise, trust the host key. -} - [ if knownhost then "" else sshOpt "StrictHostKeyChecking" "no" - , "-n" -- don't read from stdin - , genSshHost (fromJust $ hostname sshinput) (username sshinput) - , remotecommand - ] - parsetranscript . fst <$> sshTranscript sshopts "" - parsetranscript s - | reported "git-annex-shell" = UsableSshInput - | reported osx_shim = UsableSshInput - | reported "rsync" = UsableRsyncServer - | reported "loggedin" = UnusableServer - "Neither rsync nor git-annex are installed on the server. Perhaps you should go install them?" - | otherwise = UnusableServer $ T.pack $ - "Failed to ssh to the server. Transcript: " ++ s - where - reported r = token r `isInfixOf` s - checkcommand c = "if which " ++ c ++ "; then " ++ report c ++ "; fi" - token r = "git-annex-probe " ++ r - report r = "echo " ++ token r - osx_shim = "~/.ssh/git-annex-shell" + where + ret status needspubkey = return $ Right $ (mkSshData sshinput) + { needsPubKey = needspubkey + , rsyncOnly = status == UsableRsyncServer + } + probe extraopts = do + let remotecommand = join ";" + [ report "loggedin" + , checkcommand "git-annex-shell" + , checkcommand "rsync" + , checkcommand shim + ] + knownhost <- knownHost hn + let sshopts = filter (not . null) $ extraopts ++ + {- If this is an already known host, let + - ssh check it as usual. + - Otherwise, trust the host key. -} + [ if knownhost then "" else sshOpt "StrictHostKeyChecking" "no" + , "-n" -- don't read from stdin + , genSshHost (fromJust $ hostname sshinput) (username sshinput) + , remotecommand + ] + parsetranscript . fst <$> sshTranscript sshopts "" + parsetranscript s + | reported "git-annex-shell" = UsableSshInput + | reported shim = UsableSshInput + | reported "rsync" = UsableRsyncServer + | reported "loggedin" = UnusableServer + "Neither rsync nor git-annex are installed on the server. Perhaps you should go install them?" + | otherwise = UnusableServer $ T.pack $ + "Failed to ssh to the server. Transcript: " ++ s + where + reported r = token r `isInfixOf` s + checkcommand c = "if which " ++ c ++ "; then " ++ report c ++ "; fi" + token r = "git-annex-probe " ++ r + report r = "echo " ++ token r + shim = "~/.ssh/git-annex-shell" {- Runs a ssh command; if it fails shows the user the transcript, - and if it succeeds, runs an action. -} @@ -269,29 +267,24 @@ makeSsh' :: Bool -> (Remote -> Handler ()) -> SshData -> Maybe SshKeyPair -> Han makeSsh' rsync setup sshdata keypair = sshSetup [sshhost, remoteCommand] "" $ makeSshRepo rsync setup sshdata - where - sshhost = genSshHost (sshHostName sshdata) (sshUserName sshdata) - remotedir = T.unpack $ sshDirectory sshdata - remoteCommand = join "&&" $ catMaybes - [ Just $ "mkdir -p " ++ shellEscape remotedir - , Just $ "cd " ++ shellEscape remotedir - , if rsync then Nothing else Just "git init --bare --shared" - , if rsync then Nothing else Just "git annex init" - , if needsPubKey sshdata - then addAuthorizedKeysCommand (rsyncOnly sshdata) . sshPubKey <$> keypair - else Nothing - ] + where + sshhost = genSshHost (sshHostName sshdata) (sshUserName sshdata) + remotedir = T.unpack $ sshDirectory sshdata + remoteCommand = join "&&" $ catMaybes + [ Just $ "mkdir -p " ++ shellEscape remotedir + , Just $ "cd " ++ shellEscape remotedir + , if rsync then Nothing else Just "git init --bare --shared" + , if rsync then Nothing else Just "git annex init" + , if needsPubKey sshdata + then addAuthorizedKeysCommand (rsyncOnly sshdata) remotedir . sshPubKey <$> keypair + else Nothing + ] makeSshRepo :: Bool -> (Remote -> Handler ()) -> SshData -> Handler RepHtml makeSshRepo forcersync setup sshdata = do - webapp <- getYesod - r <- liftIO $ makeSshRemote - (fromJust $ threadState webapp) - (daemonStatus webapp) - (scanRemotes webapp) - forcersync sshdata + r <- liftAssistant $ makeSshRemote forcersync sshdata setup r - redirect $ EditNewRepositoryR $ Remote.uuid r + redirect $ EditNewCloudRepositoryR $ Remote.uuid r getAddRsyncNetR :: Handler RepHtml getAddRsyncNetR = do @@ -305,20 +298,22 @@ getAddRsyncNetR = do $(widgetFile "configurators/addrsync.net") case result of FormSuccess sshinput - | isRsyncNet (hostname sshinput) -> - makeRsyncNet sshinput setupGroup + | isRsyncNet (hostname sshinput) -> do + let reponame = genSshRepoName "rsync.net" + (maybe "" T.unpack $ directory sshinput) + makeRsyncNet sshinput reponame setupGroup | otherwise -> showform $ UnusableServer "That is not a rsync.net host name." _ -> showform UntestedServer -makeRsyncNet :: SshInput -> (Remote -> Handler ()) -> Handler RepHtml -makeRsyncNet sshinput setup = do +makeRsyncNet :: SshInput -> String -> (Remote -> Handler ()) -> Handler RepHtml +makeRsyncNet sshinput reponame setup = do knownhost <- liftIO $ maybe (return False) knownHost (hostname sshinput) keypair <- liftIO $ genSshKeyPair sshdata <- liftIO $ setupSshKeyPair keypair $ (mkSshData sshinput) - { sshRepoName = "rsync.net" + { sshRepoName = reponame , needsPubKey = True , rsyncOnly = True } diff --git a/Assistant/WebApp/Configurators/XMPP.hs b/Assistant/WebApp/Configurators/XMPP.hs new file mode 100644 index 0000000..9d41a85 --- /dev/null +++ b/Assistant/WebApp/Configurators/XMPP.hs @@ -0,0 +1,169 @@ +{- git-annex assistant XMPP configuration + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU AGPL version 3 or higher. + -} + +{-# LANGUAGE TypeFamilies, QuasiQuotes, MultiParamTypeClasses, TemplateHaskell, OverloadedStrings #-} +{-# LANGUAGE CPP #-} + +module Assistant.WebApp.Configurators.XMPP where + +import Assistant.WebApp +import Assistant.WebApp.Types +import Assistant.WebApp.Notifications +import Assistant.WebApp.SideBar +import Utility.Yesod +import Utility.NotificationBroadcaster +#ifdef WITH_XMPP +import Assistant.Common +import Assistant.XMPP.Client +import Assistant.XMPP.Buddies +import Assistant.Types.Buddies +import Assistant.NetMessager +import Assistant.Alert +import Assistant.DaemonStatus +import Utility.SRV +#endif + +import Yesod +#ifdef WITH_XMPP +import Network +import Network.Protocol.XMPP +import Data.Text (Text) +import qualified Data.Text as T +#endif + +{- Displays an alert suggesting to configure XMPP, with a button. -} +xmppNeeded :: Handler () +#ifdef WITH_XMPP +xmppNeeded = whenM (isNothing <$> runAnnex Nothing getXMPPCreds) $ do + urlrender <- getUrlRender + void $ liftAssistant $ do + close <- asIO1 removeAlert + addAlert $ xmppNeededAlert $ AlertButton + { buttonLabel = "Configure a Jabber account" + , buttonUrl = urlrender XMPPR + , buttonAction = Just close + } +#else +xmppNeeded = return () +#endif + +getXMPPR :: Handler RepHtml +#ifdef WITH_XMPP +getXMPPR = getXMPPR' ConfigR +#else +getXMPPR = xmppPage $ + $(widgetFile "configurators/xmpp/disabled") +#endif + +getXMPPForPairingR :: Handler RepHtml +#ifdef WITH_XMPP +getXMPPForPairingR = getXMPPR' StartXMPPPairR +#else +getXMPPForPairingR = xmppPage $ + $(widgetFile "configurators/xmpp/disabled") +#endif + +#ifdef WITH_XMPP +getXMPPR' :: Route WebApp -> Handler RepHtml +getXMPPR' redirto = xmppPage $ do + ((result, form), enctype) <- lift $ do + oldcreds <- runAnnex Nothing getXMPPCreds + runFormGet $ renderBootstrap $ xmppAForm $ + creds2Form <$> oldcreds + let showform problem = do + let authtoken = webAppFormAuthToken + $(widgetFile "configurators/xmpp") + case result of + FormSuccess f -> maybe (showform True) (lift . storecreds) + =<< liftIO (validateForm f) + _ -> showform False + where + storecreds creds = do + void $ runAnnex undefined $ setXMPPCreds creds + liftAssistant notifyNetMessagerRestart + redirect redirto +#endif + +{- Called by client to get a list of buddies. + - + - Returns a div, which will be inserted into the calling page. + -} +getBuddyListR :: NotificationId -> Handler RepHtml +getBuddyListR nid = do + waitNotifier getBuddyListBroadcaster nid + + page <- widgetToPageContent $ buddyListDisplay + hamletToRepHtml $ [hamlet|^{pageBody page}|] + +buddyListDisplay :: Widget +buddyListDisplay = do + autoUpdate ident NotifierBuddyListR (10 :: Int) (10 :: Int) +#ifdef WITH_XMPP + buddies <- lift $ liftAssistant $ do + rs <- filter isXMPPRemote . syncGitRemotes <$> getDaemonStatus + let pairedwith = catMaybes $ map (parseJID . getXMPPClientID) rs + catMaybes . map (buddySummary pairedwith) + <$> (getBuddyList <<~ buddyList) + $(widgetFile "configurators/xmpp/buddylist") +#endif + where + ident = "buddylist" + +#ifdef WITH_XMPP + +data XMPPForm = XMPPForm + { formJID :: Text + , formPassword :: Text } + +creds2Form :: XMPPCreds -> XMPPForm +creds2Form c = XMPPForm (xmppJID c) (xmppPassword c) + +xmppAForm :: (Maybe XMPPForm) -> AForm WebApp WebApp XMPPForm +xmppAForm def = XMPPForm + <$> areq jidField "Jabber address" (formJID <$> def) + <*> areq passwordField "Password" Nothing + +jidField :: Field WebApp WebApp Text +jidField = checkBool (isJust . parseJID) bad textField + where + bad :: Text + bad = "This should look like an email address.." + +validateForm :: XMPPForm -> IO (Maybe XMPPCreds) +validateForm f = do + let jid = fromMaybe (error "bad JID") $ parseJID (formJID f) + let domain = T.unpack $ strDomain $ jidDomain jid + hostports <- lookupSRV $ mkSRVTcp "xmpp-client" domain + let username = fromMaybe "" (strNode <$> jidNode jid) + case hostports of + ((h, PortNumber p):_) -> testXMPP $ XMPPCreds + { xmppUsername = username + , xmppPassword = formPassword f + , xmppHostname = h + , xmppPort = fromIntegral p + , xmppJID = formJID f + } + _ -> testXMPP $ XMPPCreds + { xmppUsername = username + , xmppPassword = formPassword f + , xmppHostname = T.unpack $ strDomain $ jidDomain jid + , xmppPort = 5222 + , xmppJID = formJID f + } + +testXMPP :: XMPPCreds -> IO (Maybe XMPPCreds) +testXMPP creds = either (const $ return Nothing) + (const $ return $ Just creds) + =<< connectXMPP creds (const noop) + +#endif + +xmppPage :: Widget -> Handler RepHtml +xmppPage w = bootstrap (Just Config) $ do + sideBarDisplay + setTitle "Jabber" + w diff --git a/Assistant/WebApp/DashBoard.hs b/Assistant/WebApp/DashBoard.hs index 897fddf..44e6461 100644 --- a/Assistant/WebApp/DashBoard.hs +++ b/Assistant/WebApp/DashBoard.hs @@ -16,7 +16,6 @@ import Assistant.WebApp.Utility import Assistant.WebApp.SideBar import Assistant.WebApp.Notifications import Assistant.WebApp.Configurators -import Assistant.DaemonStatus import Assistant.TransferQueue import Utility.NotificationBroadcaster import Utility.Yesod @@ -39,7 +38,7 @@ transfersDisplay :: Bool -> Widget transfersDisplay warnNoScript = do webapp <- lift getYesod current <- lift $ M.toList <$> getCurrentTransfers - queued <- liftIO $ getTransferQueue $ transferQueue webapp + queued <- lift $ liftAssistant getTransferQueue autoUpdate ident NotifierTransfersR (10 :: Int) (10 :: Int) let transfers = simplifyTransfers $ current ++ queued if null transfers @@ -48,10 +47,10 @@ transfersDisplay warnNoScript = do , $(widgetFile "dashboard/transfers") ) else $(widgetFile "dashboard/transfers") - where - ident = "transfers" - isrunning info = not $ - transferPaused info || isNothing (startedTime info) + where + ident = "transfers" + isrunning info = not $ + transferPaused info || isNothing (startedTime info) {- Simplifies a list of transfers, avoiding display of redundant - equivilant transfers. -} @@ -72,7 +71,7 @@ simplifyTransfers (v@(t1, _):r@((t2, _):l)) -} getTransfersR :: NotificationId -> Handler RepHtml getTransfersR nid = do - waitNotifier transferNotifier nid + waitNotifier getTransferBroadcaster nid page <- widgetToPageContent $ transfersDisplay False hamletToRepHtml $ [hamlet|^{pageBody page}|] @@ -137,11 +136,11 @@ openFileBrowser = do void $ redirectUltDest HomeR return False ) - where + where #ifdef darwin_HOST_OS - cmd = "open" + cmd = "open" #else - cmd = "xdg-open" + cmd = "xdg-open" #endif {- Transfer controls. The GET is done in noscript mode and redirects back diff --git a/Assistant/WebApp/Notifications.hs b/Assistant/WebApp/Notifications.hs index 7e71ee6..c841049 100644 --- a/Assistant/WebApp/Notifications.hs +++ b/Assistant/WebApp/Notifications.hs @@ -13,6 +13,7 @@ import Assistant.Common import Assistant.WebApp import Assistant.WebApp.Types import Assistant.DaemonStatus +import Assistant.Types.Buddies import Utility.NotificationBroadcaster import Utility.Yesod @@ -41,9 +42,9 @@ autoUpdate ident geturl ms_delay ms_startdelay = do - of NotificationIds when noscript pages are loaded. This constructs a - notifier url for a given Route and NotificationBroadcaster. -} -notifierUrl :: (NotificationId -> Route WebApp) -> (DaemonStatus -> NotificationBroadcaster) -> Handler RepPlain -notifierUrl route selector = do - (urlbits, _params) <- renderRoute . route <$> newNotifier selector +notifierUrl :: (NotificationId -> Route WebApp) -> Assistant NotificationBroadcaster -> Handler RepPlain +notifierUrl route broadcaster = do + (urlbits, _params) <- renderRoute . route <$> newNotifier broadcaster webapp <- getYesod return $ RepPlain $ toContent $ T.concat [ "/" @@ -53,7 +54,19 @@ notifierUrl route selector = do ] getNotifierTransfersR :: Handler RepPlain -getNotifierTransfersR = notifierUrl TransfersR transferNotifier +getNotifierTransfersR = notifierUrl TransfersR getTransferBroadcaster getNotifierSideBarR :: Handler RepPlain -getNotifierSideBarR = notifierUrl SideBarR alertNotifier +getNotifierSideBarR = notifierUrl SideBarR getAlertBroadcaster + +getNotifierBuddyListR :: Handler RepPlain +getNotifierBuddyListR = notifierUrl BuddyListR getBuddyListBroadcaster + +getTransferBroadcaster :: Assistant NotificationBroadcaster +getTransferBroadcaster = transferNotifier <$> getDaemonStatus + +getAlertBroadcaster :: Assistant NotificationBroadcaster +getAlertBroadcaster = alertNotifier <$> getDaemonStatus + +getBuddyListBroadcaster :: Assistant NotificationBroadcaster +getBuddyListBroadcaster = getBuddyBroadcaster <$> getAssistant buddyList diff --git a/Assistant/WebApp/OtherRepos.hs b/Assistant/WebApp/OtherRepos.hs index 301c4ce..49dd0df 100644 --- a/Assistant/WebApp/OtherRepos.hs +++ b/Assistant/WebApp/OtherRepos.hs @@ -29,25 +29,23 @@ getSwitchToRepositoryR repo = do liftIO startassistant url <- liftIO geturl redirect url - where - startassistant = do - program <- readProgramFile - void $ forkIO $ void $ createProcess $ - (proc program ["assistant"]) - { cwd = Just repo } - geturl = do - r <- Git.Config.read =<< Git.Construct.fromPath repo - waiturl $ gitAnnexUrlFile r - waiturl urlfile = do - v <- tryIO $ readFile urlfile - case v of - Left _ -> delayed $ waiturl urlfile - Right url -> ifM (listening url) - ( return url - , delayed $ waiturl urlfile - ) - listening url = catchBoolIO $ - fst <$> Url.exists url [] - delayed a = do - threadDelay 100000 -- 1/10th of a second - a + where + startassistant = do + program <- readProgramFile + void $ forkIO $ void $ createProcess $ + (proc program ["assistant"]) { cwd = Just repo } + geturl = do + r <- Git.Config.read =<< Git.Construct.fromPath repo + waiturl $ gitAnnexUrlFile r + waiturl urlfile = do + v <- tryIO $ readFile urlfile + case v of + Left _ -> delayed $ waiturl urlfile + Right url -> ifM (listening url) + ( return url + , delayed $ waiturl urlfile + ) + listening url = catchBoolIO $ fst <$> Url.exists url [] + delayed a = do + threadDelay 100000 -- 1/10th of a second + a diff --git a/Assistant/WebApp/SideBar.hs b/Assistant/WebApp/SideBar.hs index 6c76592..c8ccbed 100644 --- a/Assistant/WebApp/SideBar.hs +++ b/Assistant/WebApp/SideBar.hs @@ -13,8 +13,8 @@ import Assistant.Common import Assistant.WebApp import Assistant.WebApp.Types import Assistant.WebApp.Notifications -import Assistant.DaemonStatus import Assistant.Alert +import Assistant.DaemonStatus import Utility.NotificationBroadcaster import Utility.Yesod @@ -27,28 +27,27 @@ sideBarDisplay :: Widget sideBarDisplay = do let content = do {- Add newest alerts to the sidebar. -} - webapp <- lift getYesod - alertpairs <- M.toList . alertMap - <$> liftIO (getDaemonStatus $ daemonStatus webapp) + alertpairs <- lift $ M.toList . alertMap + <$> liftAssistant getDaemonStatus mapM_ renderalert $ take displayAlerts $ reverse $ sortAlertPairs alertpairs let ident = "sidebar" $(widgetFile "sidebar/main") autoUpdate ident NotifierSideBarR (10 :: Int) (10 :: Int) - where - bootstrapclass :: AlertClass -> Text - bootstrapclass Activity = "alert-info" - bootstrapclass Warning = "alert" - bootstrapclass Error = "alert-error" - bootstrapclass Success = "alert-success" - bootstrapclass Message = "alert-info" + where + bootstrapclass :: AlertClass -> Text + bootstrapclass Activity = "alert-info" + bootstrapclass Warning = "alert" + bootstrapclass Error = "alert-error" + bootstrapclass Success = "alert-success" + bootstrapclass Message = "alert-info" - renderalert (aid, alert) = do - let alertid = show aid - let closable = alertClosable alert - let block = alertBlockDisplay alert - let divclass = bootstrapclass $ alertClass alert - $(widgetFile "sidebar/alert") + renderalert (aid, alert) = do + let alertid = show aid + let closable = alertClosable alert + let block = alertBlockDisplay alert + let divclass = bootstrapclass $ alertClass alert + $(widgetFile "sidebar/alert") {- Called by client to get a sidebar display. - @@ -60,7 +59,7 @@ sideBarDisplay = do -} getSideBarR :: NotificationId -> Handler RepHtml getSideBarR nid = do - waitNotifier alertNotifier nid + waitNotifier getAlertBroadcaster nid {- This 0.1 second delay avoids very transient notifications from - being displayed and churning the sidebar unnecesarily. @@ -74,15 +73,12 @@ getSideBarR nid = do {- Called by the client to close an alert. -} getCloseAlert :: AlertId -> Handler () -getCloseAlert i = do - webapp <- getYesod - liftIO $ removeAlert (daemonStatus webapp) i +getCloseAlert = liftAssistant . removeAlert {- When an alert with a button is clicked on, the button takes us here. -} getClickAlert :: AlertId -> Handler () getClickAlert i = do - webapp <- getYesod - m <- alertMap <$> liftIO (getDaemonStatus $ daemonStatus webapp) + m <- alertMap <$> liftAssistant getDaemonStatus case M.lookup i m of Just (Alert { alertButton = Just b }) -> do {- Spawn a thread to run the action while redirecting. -} diff --git a/Assistant/WebApp/Types.hs b/Assistant/WebApp/Types.hs index bc5eb04..b95b683 100644 --- a/Assistant/WebApp/Types.hs +++ b/Assistant/WebApp/Types.hs @@ -12,13 +12,9 @@ module Assistant.WebApp.Types where import Assistant.Common import Assistant.Ssh -import Assistant.ThreadedMonad -import Assistant.DaemonStatus -import Assistant.ScanRemotes -import Assistant.TransferQueue -import Assistant.TransferSlots import Assistant.Alert import Assistant.Pairing +import Assistant.Types.Buddies import Utility.NotificationBroadcaster import Utility.WebApp import Logs.Transfer @@ -33,16 +29,13 @@ publicFiles "static" mkYesodData "WebApp" $(parseRoutesFile "Assistant/WebApp/routes") data WebApp = WebApp - { threadState :: Maybe ThreadState - , daemonStatus :: DaemonStatusHandle - , scanRemotes :: ScanRemoteMap - , transferQueue :: TransferQueue - , transferSlots :: TransferSlots + { assistantData :: AssistantData , secretToken :: Text , relDir :: Maybe FilePath , getStatic :: Static , webAppState :: TMVar WebAppState , postFirstRun :: Maybe (IO String) + , noAnnex :: Bool } instance Yesod WebApp where @@ -52,9 +45,9 @@ instance Yesod WebApp where {- Add the auth token to every url generated, except static subsite - urls (which can show up in Permission Denied pages). -} joinPath = insertAuthToken secretToken excludeStatic - where - excludeStatic [] = True - excludeStatic (p:_) = p /= "static" + where + excludeStatic [] = True + excludeStatic (p:_) = p /= "static" makeSessionBackend = webAppSessionBackend jsLoader _ = BottomOfHeadBlocking @@ -96,3 +89,11 @@ instance PathPiece SecretReminder where instance PathPiece UUID where toPathPiece = pack . show fromPathPiece = readish . unpack + +instance PathPiece BuddyKey where + toPathPiece = pack . show + fromPathPiece = readish . unpack + +instance PathPiece PairKey where + toPathPiece = pack . show + fromPathPiece = readish . unpack diff --git a/Assistant/WebApp/Utility.hs b/Assistant/WebApp/Utility.hs index d9fa669..d4d59a9 100644 --- a/Assistant/WebApp/Utility.hs +++ b/Assistant/WebApp/Utility.hs @@ -11,8 +11,8 @@ import Assistant.Common import Assistant.WebApp import Assistant.WebApp.Types import Assistant.DaemonStatus -import Assistant.ThreadedMonad import Assistant.TransferQueue +import Assistant.Types.TransferSlots import Assistant.TransferSlots import Assistant.Sync import qualified Remote @@ -23,7 +23,6 @@ import Logs.Transfer import Locations.UserConfig import qualified Config -import Yesod import qualified Data.Map as M import Control.Concurrent import System.Posix.Signals (signalProcessGroup, sigTERM, sigKILL) @@ -37,108 +36,81 @@ changeSyncable (Just r) True = do syncRemote r changeSyncable (Just r) False = do changeSyncFlag r False - webapp <- getYesod - let dstatus = daemonStatus webapp - let st = fromJust $ threadState webapp - liftIO $ runThreadState st $ updateSyncRemotes dstatus + liftAssistant $ updateSyncRemotes {- Stop all transfers to or from this remote. - XXX Can't stop any ongoing scan, or git syncs. -} - void $ liftIO $ dequeueTransfers (transferQueue webapp) dstatus tofrom + void $ liftAssistant $ dequeueTransfers tofrom mapM_ (cancelTransfer False) =<< filter tofrom . M.keys <$> - liftIO (currentTransfers <$> getDaemonStatus dstatus) - where - tofrom t = transferUUID t == Remote.uuid r + liftAssistant (currentTransfers <$> getDaemonStatus) + where + tofrom t = transferUUID t == Remote.uuid r changeSyncFlag :: Remote -> Bool -> Handler () changeSyncFlag r enabled = runAnnex undefined $ do Config.setConfig key value void $ Remote.remoteListRefresh - where - key = Config.remoteConfig (Remote.repo r) "sync" - value - | enabled = "true" - | otherwise = "false" + where + key = Config.remoteConfig (Remote.repo r) "sync" + value + | enabled = "true" + | otherwise = "false" {- Start syncing remote, using a background thread. -} syncRemote :: Remote -> Handler () -syncRemote remote = do - webapp <- getYesod - liftIO $ syncNewRemote - (fromJust $ threadState webapp) - (daemonStatus webapp) - (scanRemotes webapp) - remote +syncRemote = liftAssistant . syncNewRemote pauseTransfer :: Transfer -> Handler () pauseTransfer = cancelTransfer True cancelTransfer :: Bool -> Transfer -> Handler () cancelTransfer pause t = do - webapp <- getYesod - let dstatus = daemonStatus webapp m <- getCurrentTransfers - liftIO $ do - unless pause $ - {- remove queued transfer -} - void $ dequeueTransfers (transferQueue webapp) dstatus $ - equivilantTransfer t - {- stop running transfer -} - maybe noop (stop dstatus) (M.lookup t m) - where - stop dstatus info = do - {- When there's a thread associated with the - - transfer, it's signaled first, to avoid it - - displaying any alert about the transfer having - - failed when the transfer process is killed. -} - maybe noop signalthread $ transferTid info - maybe noop killproc $ transferPid info - if pause - then void $ - alterTransferInfo dstatus t $ \i -> i - { transferPaused = True } - else void $ - removeTransfer dstatus t - signalthread tid - | pause = throwTo tid PauseTransfer - | otherwise = killThread tid - {- In order to stop helper processes like rsync, - - kill the whole process group of the process running the - - transfer. -} - killproc pid = do - g <- getProcessGroupIDOf pid - void $ tryIO $ signalProcessGroup sigTERM g - threadDelay 50000 -- 0.05 second grace period - void $ tryIO $ signalProcessGroup sigKILL g + unless pause $ + {- remove queued transfer -} + void $ liftAssistant $ dequeueTransfers $ equivilantTransfer t + {- stop running transfer -} + maybe noop stop (M.lookup t m) + where + stop info = liftAssistant $ do + {- When there's a thread associated with the + - transfer, it's signaled first, to avoid it + - displaying any alert about the transfer having + - failed when the transfer process is killed. -} + liftIO $ maybe noop signalthread $ transferTid info + liftIO $ maybe noop killproc $ transferPid info + if pause + then void $ alterTransferInfo t $ + \i -> i { transferPaused = True } + else void $ removeTransfer t + signalthread tid + | pause = throwTo tid PauseTransfer + | otherwise = killThread tid + {- In order to stop helper processes like rsync, + - kill the whole process group of the process running the transfer. -} + killproc pid = do + g <- getProcessGroupIDOf pid + void $ tryIO $ signalProcessGroup sigTERM g + threadDelay 50000 -- 0.05 second grace period + void $ tryIO $ signalProcessGroup sigKILL g startTransfer :: Transfer -> Handler () startTransfer t = do m <- getCurrentTransfers maybe startqueued go (M.lookup t m) - where - go info = maybe (start info) resume $ transferTid info - startqueued = do - webapp <- getYesod - let dstatus = daemonStatus webapp - let q = transferQueue webapp - is <- liftIO $ map snd <$> getMatchingTransfers q dstatus (== t) - maybe noop start $ headMaybe is - resume tid = do - webapp <- getYesod - let dstatus = daemonStatus webapp - liftIO $ do - alterTransferInfo dstatus t $ \i -> i - { transferPaused = False } - throwTo tid ResumeTransfer - start info = do - webapp <- getYesod - let st = fromJust $ threadState webapp - let dstatus = daemonStatus webapp - let slots = transferSlots webapp - liftIO $ inImmediateTransferSlot dstatus slots $ do - program <- readProgramFile - Transferrer.startTransfer st dstatus program t info + where + go info = maybe (start info) resume $ transferTid info + startqueued = do + is <- liftAssistant $ map snd <$> getMatchingTransfers (== t) + maybe noop start $ headMaybe is + resume tid = do + liftAssistant $ alterTransferInfo t $ + \i -> i { transferPaused = False } + liftIO $ throwTo tid ResumeTransfer + start info = liftAssistant $ do + program <- liftIO readProgramFile + inImmediateTransferSlot $ + Transferrer.startTransfer program t info getCurrentTransfers :: Handler TransferMap -getCurrentTransfers = currentTransfers - <$> (liftIO . getDaemonStatus =<< daemonStatus <$> getYesod) +getCurrentTransfers = currentTransfers <$> liftAssistant getDaemonStatus diff --git a/Assistant/WebApp/routes b/Assistant/WebApp/routes index 0991f22..2d64672 100644 --- a/Assistant/WebApp/routes +++ b/Assistant/WebApp/routes @@ -6,12 +6,15 @@ /config ConfigR GET /config/repository RepositoriesR GET +/config/xmpp XMPPR GET +/config/xmpp/for/pairing XMPPForPairingR GET /config/repository/new/first FirstRepositoryR GET /config/repository/new NewRepositoryR GET /config/repository/switchto/#FilePath SwitchToRepositoryR GET /config/repository/edit/#UUID EditRepositoryR GET /config/repository/edit/new/#UUID EditNewRepositoryR GET +/config/repository/edit/new/cloud/#UUID EditNewCloudRepositoryR GET /config/repository/sync/disable/#UUID DisableSyncR GET /config/repository/sync/enable/#UUID EnableSyncR GET @@ -23,18 +26,27 @@ /config/repository/add/cloud/rsync.net AddRsyncNetR GET /config/repository/add/cloud/S3 AddS3R GET -/config/repository/pair/start StartPairR GET -/config/repository/pair/inprogress/#SecretReminder InprogressPairR GET -/config/repository/pair/finish/#PairMsg FinishPairR GET +/config/repository/pair/local/start StartLocalPairR GET +/config/repository/pair/local/running/#SecretReminder RunningLocalPairR GET +/config/repository/pair/local/finish/#PairMsg FinishLocalPairR GET +/config/repository/pair/xmpp/start StartXMPPPairR GET +/config/repository/pair/xmpp/running/#BuddyKey RunningXMPPPairR GET +/config/repository/pair/xmpp/accept/#PairKey ConfirmXMPPPairR GET +/config/repository/pair/xmpp/finish/#PairKey FinishXMPPPairR GET /config/repository/enable/rsync/#UUID EnableRsyncR GET /config/repository/enable/directory/#UUID EnableDirectoryR GET /config/repository/enable/S3/#UUID EnableS3R GET /transfers/#NotificationId TransfersR GET -/sidebar/#NotificationId SideBarR GET /notifier/transfers NotifierTransfersR GET + +/sidebar/#NotificationId SideBarR GET /notifier/sidebar NotifierSideBarR GET + +/buddylist/#NotificationId BuddyListR GET +/notifier/buddylist NotifierBuddyListR GET + /alert/close/#AlertId CloseAlert GET /alert/click/#AlertId ClickAlert GET /filebrowser FileBrowserR GET POST diff --git a/Assistant/XMPP.hs b/Assistant/XMPP.hs new file mode 100644 index 0000000..2c00044 --- /dev/null +++ b/Assistant/XMPP.hs @@ -0,0 +1,241 @@ +{- core xmpp support + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +{-# LANGUAGE OverloadedStrings #-} + +module Assistant.XMPP where + +import Assistant.Common +import Assistant.Types.NetMessager +import Assistant.Pairing + +import Network.Protocol.XMPP hiding (Node) +import Data.Text (Text) +import qualified Data.Text as T +import qualified Data.Map as M +import Data.ByteString (ByteString) +import qualified Data.ByteString as B +import Data.XML.Types +import qualified Codec.Binary.Base64 as B64 + +{- Name of the git-annex tag, in our own XML namespace. + - (Not using a namespace URL to avoid unnecessary bloat.) -} +gitAnnexTagName :: Name +gitAnnexTagName = "{git-annex}git-annex" + +{- Creates a git-annex tag containing a particular attribute and value. -} +gitAnnexTag :: Name -> Text -> Element +gitAnnexTag attr val = gitAnnexTagContent attr val [] + +{- Also with some content. -} +gitAnnexTagContent :: Name -> Text -> [Node] -> Element +gitAnnexTagContent attr val = Element gitAnnexTagName [(attr, [ContentText val])] + +isGitAnnexTag :: Element -> Bool +isGitAnnexTag t = elementName t == gitAnnexTagName + +{- Things that a git-annex tag can inserted into. -} +class GitAnnexTaggable a where + insertGitAnnexTag :: a -> Element -> a + + extractGitAnnexTag :: a -> Maybe Element + + hasGitAnnexTag :: a -> Bool + hasGitAnnexTag = isJust . extractGitAnnexTag + +instance GitAnnexTaggable Message where + insertGitAnnexTag m elt = m { messagePayloads = elt : messagePayloads m } + extractGitAnnexTag = headMaybe . filter isGitAnnexTag . messagePayloads + +instance GitAnnexTaggable Presence where + -- always mark extended away and set presence priority to negative + insertGitAnnexTag p elt = p + { presencePayloads = extendedAway : negativePriority : elt : presencePayloads p } + extractGitAnnexTag = headMaybe . filter isGitAnnexTag . presencePayloads + +data GitAnnexTagInfo = GitAnnexTagInfo + { tagAttr :: Name + , tagValue :: Text + , tagElement :: Element + } + +type Decoder = Message -> GitAnnexTagInfo -> Maybe NetMessage + +gitAnnexTagInfo :: GitAnnexTaggable a => a -> Maybe GitAnnexTagInfo +gitAnnexTagInfo v = case extractGitAnnexTag v of + {- Each git-annex tag has a single attribute. -} + Just (tag@(Element _ [(attr, _)] _)) -> GitAnnexTagInfo + <$> pure attr + <*> attributeText attr tag + <*> pure tag + _ -> Nothing + +{- A presence with a git-annex tag in it. -} +gitAnnexPresence :: Element -> Presence +gitAnnexPresence = insertGitAnnexTag $ emptyPresence PresenceAvailable + +{- A presence with an empty git-annex tag in it, used for letting other + - clients know we're around and are a git-annex client. -} +gitAnnexSignature :: Presence +gitAnnexSignature = gitAnnexPresence $ Element gitAnnexTagName [] [] + +{- A message with a git-annex tag in it. -} +gitAnnexMessage :: Element -> JID -> JID -> Message +gitAnnexMessage elt tojid fromjid = (insertGitAnnexTag silentMessage elt) + { messageTo = Just tojid + , messageFrom = Just fromjid + } + +{- A notification that we've pushed to some repositories, listing their + - UUIDs. -} +pushNotification :: [UUID] -> Presence +pushNotification = gitAnnexPresence . gitAnnexTag pushAttr . encodePushNotification + +encodePushNotification :: [UUID] -> Text +encodePushNotification = T.intercalate uuidSep . map (T.pack . fromUUID) + +decodePushNotification :: Text -> [UUID] +decodePushNotification = map (toUUID . T.unpack) . T.splitOn uuidSep + +uuidSep :: Text +uuidSep = "," + +{- A request for other git-annex clients to send presence. -} +presenceQuery :: Presence +presenceQuery = gitAnnexPresence $ gitAnnexTag queryAttr T.empty + +{- A notification about a stage of pairing. -} +pairingNotification :: PairStage -> UUID -> JID -> JID -> Message +pairingNotification pairstage u = gitAnnexMessage $ + gitAnnexTag pairAttr $ encodePairingNotification pairstage u + +encodePairingNotification :: PairStage -> UUID -> Text +encodePairingNotification pairstage u = T.unwords $ map T.pack + [ show pairstage + , fromUUID u + ] + +decodePairingNotification :: Decoder +decodePairingNotification m = parse . words . T.unpack . tagValue + where + parse [stage, u] = PairingNotification + <$> readish stage + <*> (formatJID <$> messageFrom m) + <*> pure (toUUID u) + parse _ = Nothing + +pushMessage :: PushStage -> JID -> JID -> Message +pushMessage = gitAnnexMessage . encode + where + encode CanPush = gitAnnexTag canPushAttr T.empty + encode PushRequest = gitAnnexTag pushRequestAttr T.empty + encode StartingPush = gitAnnexTag startingPushAttr T.empty + encode (ReceivePackOutput b) = + gitAnnexTagContent receivePackAttr T.empty $ encodeTagContent b + encode (SendPackOutput b) = + gitAnnexTagContent sendPackAttr T.empty $ encodeTagContent b + encode (ReceivePackDone code) = + gitAnnexTag receivePackDoneAttr $ + T.pack $ show $ encodeExitCode code + +decodeMessage :: Message -> Maybe NetMessage +decodeMessage m = decode =<< gitAnnexTagInfo m + where + decode i = M.lookup (tagAttr i) decoders >>= rundecoder i + rundecoder i d = d m i + decoders = M.fromList $ zip + [ pairAttr + , canPushAttr + , pushRequestAttr + , startingPushAttr + , receivePackAttr + , sendPackAttr + , receivePackDoneAttr + ] + [ decodePairingNotification + , pushdecoder $ const $ Just CanPush + , pushdecoder $ const $ Just PushRequest + , pushdecoder $ const $ Just StartingPush + , pushdecoder $ + fmap ReceivePackOutput . decodeTagContent . tagElement + , pushdecoder $ + fmap SendPackOutput . decodeTagContent . tagElement + , pushdecoder $ + fmap (ReceivePackDone . decodeExitCode) . readish . + T.unpack . tagValue + ] + pushdecoder a m' i = Pushing + <$> (formatJID <$> messageFrom m') + <*> a i + +decodeExitCode :: Int -> ExitCode +decodeExitCode 0 = ExitSuccess +decodeExitCode n = ExitFailure n + +encodeExitCode :: ExitCode -> Int +encodeExitCode ExitSuccess = 0 +encodeExitCode (ExitFailure n) = n + +{- Base 64 encoding a ByteString to use as the content of a tag. -} +encodeTagContent :: ByteString -> [Node] +encodeTagContent b = [NodeContent $ ContentText $ T.pack $ B64.encode $ B.unpack b] + +decodeTagContent :: Element -> Maybe ByteString +decodeTagContent elt = B.pack <$> B64.decode s + where + s = T.unpack $ T.concat $ elementText elt + +{- The JID without the client part. -} +baseJID :: JID -> JID +baseJID j = JID (jidNode j) (jidDomain j) Nothing + +{- An XMPP chat message with an empty body. This should not be displayed + - by clients, but can be used for communications. -} +silentMessage :: Message +silentMessage = (emptyMessage MessageChat) + { messagePayloads = [ emptybody ] } + where + emptybody = Element + { elementName = "body" + , elementAttributes = [] + , elementNodes = [] + } + +{- Add to a presence to mark its client as extended away. -} +extendedAway :: Element +extendedAway = Element "show" [] [NodeContent $ ContentText "xa"] + +{- Add to a presence to give it a negative priority. -} +negativePriority :: Element +negativePriority = Element "priority" [] [NodeContent $ ContentText "-1"] + +pushAttr :: Name +pushAttr = "push" + +queryAttr :: Name +queryAttr = "query" + +pairAttr :: Name +pairAttr = "pair" + +canPushAttr :: Name +canPushAttr = "canpush" + +pushRequestAttr :: Name +pushRequestAttr = "pushrequest" + +startingPushAttr :: Name +startingPushAttr = "startingpush" + +receivePackAttr :: Name +receivePackAttr = "rp" + +sendPackAttr :: Name +sendPackAttr = "sp" + +receivePackDoneAttr :: Name +receivePackDoneAttr = "rpdone" diff --git a/Assistant/XMPP/Buddies.hs b/Assistant/XMPP/Buddies.hs new file mode 100644 index 0000000..7383c38 --- /dev/null +++ b/Assistant/XMPP/Buddies.hs @@ -0,0 +1,83 @@ +{- xmpp buddies + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.XMPP.Buddies where + +import Assistant.XMPP +import Common.Annex +import Assistant.Types.Buddies + +import Network.Protocol.XMPP +import qualified Data.Map as M +import qualified Data.Set as S +import Data.Text (Text) +import qualified Data.Text as T + +genBuddyKey :: JID -> BuddyKey +genBuddyKey j = BuddyKey $ formatJID $ baseJID j + +buddyName :: JID -> Text +buddyName j = maybe (T.pack "") strNode (jidNode j) + +{- Summary of info about a buddy. + - + - If the buddy has no clients at all anymore, returns Nothing. -} +buddySummary :: [JID] -> Buddy -> Maybe (Text, Bool, Bool, Bool, BuddyKey) +buddySummary pairedwith b = case clients of + ((Client j):_) -> Just (buddyName j, away, canpair, alreadypaired j, genBuddyKey j) + [] -> Nothing + where + away = S.null (buddyPresent b) && S.null (buddyAssistants b) + canpair = not $ S.null (buddyAssistants b) + clients = S.toList $ buddyPresent b `S.union` buddyAway b `S.union` buddyAssistants b + alreadypaired j = baseJID j `elem` pairedwith + +{- Updates the buddies with XMPP presence info. -} +updateBuddies :: Presence -> Buddies -> Buddies +updateBuddies p@(Presence { presenceFrom = Just jid }) = M.alter update key + where + key = genBuddyKey jid + update (Just b) = Just $ applyPresence p b + update Nothing = newBuddy p +updateBuddies _ = id + +{- Creates a new buddy based on XMPP presence info. -} +newBuddy :: Presence -> Maybe Buddy +newBuddy p + | presenceType p == PresenceAvailable = go + | presenceType p == PresenceUnavailable = go + | otherwise = Nothing + where + go = make <$> presenceFrom p + make _jid = applyPresence p $ Buddy + { buddyPresent = S.empty + , buddyAway = S.empty + , buddyAssistants = S.empty + , buddyPairing = False + } + +applyPresence :: Presence -> Buddy -> Buddy +applyPresence p b = fromMaybe b $! go <$> presenceFrom p + where + go jid + | presenceType p == PresenceUnavailable = b + { buddyAway = addto $ buddyAway b + , buddyPresent = removefrom $ buddyPresent b + , buddyAssistants = removefrom $ buddyAssistants b + } + | hasGitAnnexTag p = b + { buddyAssistants = addto $ buddyAssistants b + , buddyAway = removefrom $ buddyAway b } + | presenceType p == PresenceAvailable = b + { buddyPresent = addto $ buddyPresent b + , buddyAway = removefrom $ buddyAway b + } + | otherwise = b + where + client = Client jid + removefrom = S.filter (/= client) + addto = S.insert client diff --git a/Assistant/XMPP/Client.hs b/Assistant/XMPP/Client.hs new file mode 100644 index 0000000..8ab0c28 --- /dev/null +++ b/Assistant/XMPP/Client.hs @@ -0,0 +1,85 @@ +{- xmpp client support + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.XMPP.Client where + +import Assistant.Common +import Utility.FileMode +import Utility.SRV + +import Network.Protocol.XMPP +import Network +import Control.Concurrent +import qualified Data.Text as T +import Control.Exception (SomeException) + +{- Everything we need to know to connect to an XMPP server. -} +data XMPPCreds = XMPPCreds + { xmppUsername :: T.Text + , xmppPassword :: T.Text + , xmppHostname :: HostName + , xmppPort :: Int + , xmppJID :: T.Text + } + deriving (Read, Show) + +connectXMPP :: XMPPCreds -> (JID -> XMPP a) -> IO (Either SomeException ()) +connectXMPP c a = case parseJID (xmppJID c) of + Nothing -> error "bad JID" + Just jid -> connectXMPP' jid c a + +{- Do a SRV lookup, but if it fails, fall back to the cached xmppHostname. -} +connectXMPP' :: JID -> XMPPCreds -> (JID -> XMPP a) -> IO (Either SomeException ()) +connectXMPP' jid c a = go =<< lookupSRV srvrecord + where + srvrecord = mkSRVTcp "xmpp-client" $ + T.unpack $ strDomain $ jidDomain jid + serverjid = JID Nothing (jidDomain jid) Nothing + + go [] = run (xmppHostname c) + (PortNumber $ fromIntegral $ xmppPort c) + (a jid) + go ((h,p):rest) = do + {- Try each SRV record in turn, until one connects, + - at which point the MVar will be full. -} + mv <- newEmptyMVar + r <- run h p $ do + liftIO $ putMVar mv () + a jid + ifM (isEmptyMVar mv) (go rest, return r) + + {- Async exceptions are let through so the XMPP thread can + - be killed. -} + run h p a' = tryNonAsync $ + runClientError (Server serverjid h p) jid + (xmppUsername c) (xmppPassword c) (void a') + +{- XMPP runClient, that throws errors rather than returning an Either -} +runClientError :: Server -> JID -> T.Text -> T.Text -> XMPP a -> IO a +runClientError s j u p x = either (error . show) return =<< runClient s j u p x + +getXMPPCreds :: Annex (Maybe XMPPCreds) +getXMPPCreds = do + f <- xmppCredsFile + s <- liftIO $ catchMaybeIO $ readFile f + return $ readish =<< s + +setXMPPCreds :: XMPPCreds -> Annex () +setXMPPCreds creds = do + f <- xmppCredsFile + liftIO $ do + createDirectoryIfMissing True (parentDir f) + h <- openFile f WriteMode + modifyFileMode f $ removeModes + [groupReadMode, otherReadMode] + hPutStr h (show creds) + hClose h + +xmppCredsFile :: Annex FilePath +xmppCredsFile = do + dir <- fromRepo gitAnnexCredsDir + return $ dir "xmpp" diff --git a/Assistant/XMPP/Git.hs b/Assistant/XMPP/Git.hs new file mode 100644 index 0000000..da143ea --- /dev/null +++ b/Assistant/XMPP/Git.hs @@ -0,0 +1,295 @@ +{- git over XMPP + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Assistant.XMPP.Git where + +import Assistant.Common +import Assistant.NetMessager +import Assistant.Types.NetMessager +import Assistant.XMPP +import Assistant.XMPP.Buddies +import Assistant.DaemonStatus +import Assistant.Alert +import Assistant.MakeRemote +import Assistant.Sync +import qualified Command.Sync +import qualified Annex.Branch +import Annex.UUID +import Config +import Git +import qualified Git.Branch +import Locations.UserConfig +import qualified Types.Remote as Remote +import Utility.FileMode +import Utility.ThreadScheduler + +import Network.Protocol.XMPP +import qualified Data.Text as T +import System.Posix.Env +import System.Posix.Types +import System.Process (std_in, std_out, std_err) +import Control.Concurrent +import qualified Data.ByteString as B +import qualified Data.Map as M + +finishXMPPPairing :: JID -> UUID -> Assistant () +finishXMPPPairing jid u = void $ alertWhile alert $ + makeXMPPGitRemote buddy (baseJID jid) u + where + buddy = T.unpack $ buddyName jid + alert = pairRequestAcknowledgedAlert buddy Nothing + +gitXMPPLocation :: JID -> String +gitXMPPLocation jid = "xmpp::" ++ T.unpack (formatJID $ baseJID jid) + +makeXMPPGitRemote :: String -> JID -> UUID -> Assistant Bool +makeXMPPGitRemote buddyname jid u = do + remote <- liftAnnex $ addRemote $ + makeGitRemote buddyname $ gitXMPPLocation jid + liftAnnex $ storeUUID (remoteConfig (Remote.repo remote) "uuid") u + syncNewRemote remote + return True + +{- Pushes over XMPP, communicating with a specific client. + - Runs an arbitrary IO action to push, which should run git-push with + - an xmpp:: url. + - + - To handle xmpp:: urls, git push will run git-remote-xmpp, which is + - injected into its PATH, and in turn runs git-annex xmppgit. The + - dataflow them becomes: + - + - git push <--> git-annex xmppgit <--> xmppPush <-------> xmpp + - | + - git receive-pack <--> xmppReceivePack <---------------> xmpp + - + - The pipe between git-annex xmppgit and us is set up and communicated + - using two environment variables, relayIn and relayOut, that are set + - to the file descriptors to use. Another, relayControl, is used to + - propigate the exit status of git receive-pack. + - + - We listen at the other end of the pipe and relay to and from XMPP. + -} +xmppPush :: ClientID -> (Git.Repo -> IO Bool) -> Assistant Bool +xmppPush cid gitpush = runPush SendPack cid handleDeferred $ do + sendNetMessage $ Pushing cid StartingPush + + (Fd inf, writepush) <- liftIO createPipe + (readpush, Fd outf) <- liftIO createPipe + (Fd controlf, writecontrol) <- liftIO createPipe + + tmp <- liftAnnex $ fromRepo gitAnnexTmpDir + let tmpdir = tmp "xmppgit" + installwrapper tmpdir + + env <- liftIO getEnvironment + path <- liftIO getSearchPath + let myenv = M.fromList + [ ("PATH", join [searchPathSeparator] $ tmpdir:path) + , (relayIn, show inf) + , (relayOut, show outf) + , (relayControl, show controlf) + ] + `M.union` M.fromList env + + inh <- liftIO $ fdToHandle readpush + outh <- liftIO $ fdToHandle writepush + controlh <- liftIO $ fdToHandle writecontrol + + t1 <- forkIO <~> toxmpp inh + t2 <- forkIO <~> fromxmpp outh controlh + + {- This can take a long time to run, so avoid running it in the + - Annex monad. Also, override environment. -} + g <- liftAnnex gitRepo + r <- liftIO $ gitpush $ g { gitEnv = Just $ M.toList myenv } + + liftIO $ do + mapM_ killThread [t1, t2] + mapM_ hClose [inh, outh, controlh] + + return r + where + toxmpp inh = forever $ do + b <- liftIO $ B.hGetSome inh chunkSize + if B.null b + then liftIO $ killThread =<< myThreadId + else sendNetMessage $ Pushing cid $ SendPackOutput b + fromxmpp outh controlh = forever $ do + m <- runTimeout xmppTimeout <~> waitNetPushMessage SendPack + case m of + (Right (Pushing _ (ReceivePackOutput b))) -> + liftIO $ writeChunk outh b + (Right (Pushing _ (ReceivePackDone exitcode))) -> + liftIO $ do + hPrint controlh exitcode + hFlush controlh + (Right _) -> noop + (Left _) -> do + debug ["timeout waiting for git receive-pack output via XMPP"] + -- Send a synthetic exit code to git-annex + -- xmppgit, which will exit and cause git push + -- to die. + liftIO $ do + hPrint controlh (ExitFailure 1) + hFlush controlh + installwrapper tmpdir = liftIO $ do + createDirectoryIfMissing True tmpdir + let wrapper = tmpdir "git-remote-xmpp" + program <- readProgramFile + writeFile wrapper $ unlines + [ "#!/bin/sh" + , "exec " ++ program ++ " xmppgit" + ] + modifyFileMode wrapper $ addModes executeModes + +type EnvVar = String + +envVar :: String -> EnvVar +envVar s = "GIT_ANNEX_XMPPGIT_" ++ s + +relayIn :: EnvVar +relayIn = envVar "IN" + +relayOut :: EnvVar +relayOut = envVar "OUT" + +relayControl :: EnvVar +relayControl = envVar "CONTROL" + +relayHandle :: EnvVar -> IO Handle +relayHandle var = do + v <- getEnv var + case readish =<< v of + Nothing -> error $ var ++ " not set" + Just n -> fdToHandle $ Fd n + +{- Called by git-annex xmppgit. + - + - git-push is talking to us on stdin + - we're talking to git-push on stdout + - git-receive-pack is talking to us on relayIn (via XMPP) + - we're talking to git-receive-pack on relayOut (via XMPP) + - git-receive-pack's exit code will be passed to us on relayControl + -} +xmppGitRelay :: IO () +xmppGitRelay = do + flip relay stdout =<< relayHandle relayIn + relay stdin =<< relayHandle relayOut + code <- hGetLine =<< relayHandle relayControl + exitWith $ fromMaybe (ExitFailure 1) $ readish code + where + {- Is it possible to set up pipes and not need to copy the data + - ourselves? See splice(2) -} + relay fromh toh = void $ forkIO $ forever $ do + b <- B.hGetSome fromh chunkSize + when (B.null b) $ do + hClose fromh + hClose toh + killThread =<< myThreadId + writeChunk toh b + +{- Relays git receive-pack stdin and stdout via XMPP, as well as propigating + - its exit status to XMPP. -} +xmppReceivePack :: ClientID -> Assistant Bool +xmppReceivePack cid = runPush ReceivePack cid handleDeferred $ do + repodir <- liftAnnex $ fromRepo repoPath + let p = (proc "git" ["receive-pack", repodir]) + { std_in = CreatePipe + , std_out = CreatePipe + , std_err = Inherit + } + (Just inh, Just outh, _, pid) <- liftIO $ createProcess p + readertid <- forkIO <~> relayfromxmpp inh + relaytoxmpp outh + code <- liftIO $ waitForProcess pid + void $ sendNetMessage $ Pushing cid $ ReceivePackDone code + liftIO $ do + killThread readertid + hClose inh + hClose outh + return $ code == ExitSuccess + where + relaytoxmpp outh = do + b <- liftIO $ B.hGetSome outh chunkSize + -- empty is EOF, so exit + unless (B.null b) $ do + sendNetMessage $ Pushing cid $ ReceivePackOutput b + relaytoxmpp outh + relayfromxmpp inh = forever $ do + m <- runTimeout xmppTimeout <~> waitNetPushMessage ReceivePack + case m of + (Right (Pushing _ (SendPackOutput b))) -> + liftIO $ writeChunk inh b + (Right _) -> noop + (Left _) -> do + debug ["timeout waiting for git send-pack output via XMPP"] + -- closing the handle will make + -- git receive-pack exit + liftIO $ do + hClose inh + killThread =<< myThreadId + +xmppRemotes :: ClientID -> Assistant [Remote] +xmppRemotes cid = case baseJID <$> parseJID cid of + Nothing -> return [] + Just jid -> do + let loc = gitXMPPLocation jid + filter (matching loc . Remote.repo) . syncGitRemotes + <$> getDaemonStatus + where + matching loc r = repoIsUrl r && repoLocation r == loc + +whenXMPPRemote :: ClientID -> Assistant () -> Assistant () +whenXMPPRemote cid = unlessM (null <$> xmppRemotes cid) + +handlePushInitiation :: NetMessage -> Assistant () +handlePushInitiation (Pushing cid CanPush) = + whenXMPPRemote cid $ + sendNetMessage $ Pushing cid PushRequest + +handlePushInitiation (Pushing cid PushRequest) = + go =<< liftAnnex (inRepo Git.Branch.current) + where + go Nothing = noop + go (Just branch) = do + rs <- xmppRemotes cid + liftAnnex $ Annex.Branch.commit "update" + (g, u) <- liftAnnex $ (,) + <$> gitRepo + <*> getUUID + liftIO $ Command.Sync.updateBranch (Command.Sync.syncBranch branch) g + debug ["pushing to", show rs] + forM_ rs $ \r -> xmppPush cid $ pushFallback u branch r + +handlePushInitiation (Pushing cid StartingPush) = + whenXMPPRemote cid $ + void $ xmppReceivePack cid +handlePushInitiation _ = noop + +handleDeferred :: NetMessage -> Assistant () +handleDeferred = handlePushInitiation + +writeChunk :: Handle -> B.ByteString -> IO () +writeChunk h b = do + B.hPut h b + hFlush h + +{- Largest chunk of data to send in a single XMPP message. -} +chunkSize :: Int +chunkSize = 4096 + +{- How long to wait for an expected message before assuming the other side + - has gone away and canceling a push. + - + - This needs to be long enough to allow a message of up to 2+ times + - chunkSize to propigate up to a XMPP server, perhaps across to another + - server, and back down to us. On the other hand, other XMPP pushes can be + - delayed for running until the timeout is reached, so it should not be + - excessive. + -} +xmppTimeout :: Seconds +xmppTimeout = Seconds 120 diff --git a/Backend.hs b/Backend.hs index d1dfdef..b66e613 100644 --- a/Backend.hs +++ b/Backend.hs @@ -40,16 +40,16 @@ orderedList = do if not $ null l then return l else handle =<< Annex.getState Annex.forcebackend - where - handle Nothing = standard - handle (Just "") = standard - handle (Just name) = do - l' <- (lookupBackendName name :) <$> standard - Annex.changeState $ \s -> s { Annex.backends = l' } - return l' - standard = parseBackendList <$> getConfig (annexConfig "backends") "" - parseBackendList [] = list - parseBackendList s = map lookupBackendName $ words s + where + handle Nothing = standard + handle (Just "") = standard + handle (Just name) = do + l' <- (lookupBackendName name :) <$> standard + Annex.changeState $ \s -> s { Annex.backends = l' } + return l' + standard = parseBackendList <$> getConfig (annexConfig "backends") "" + parseBackendList [] = list + parseBackendList s = map lookupBackendName $ words s {- Generates a key for a file, trying each backend in turn until one - accepts it. @@ -66,12 +66,12 @@ genKey' (b:bs) source = do case r of Nothing -> genKey' bs source Just k -> return $ Just (makesane k, b) - where - -- keyNames should not contain newline characters. - makesane k = k { keyName = map fixbadchar (keyName k) } - fixbadchar c - | c == '\n' = '_' - | otherwise = c + where + -- keyNames should not contain newline characters. + makesane k = k { keyName = map fixbadchar (keyName k) } + fixbadchar c + | c == '\n' = '_' + | otherwise = c {- Looks up the key and backend corresponding to an annexed file, - by examining what the file symlinks to. -} @@ -81,35 +81,33 @@ lookupFile file = do case tl of Left _ -> return Nothing Right l -> makekey l - where - makekey l = maybe (return Nothing) (makeret l) (fileKey $ takeFileName l) - makeret l k = let bname = keyBackendName k in - case maybeLookupBackendName bname of - Just backend -> do - return $ Just (k, backend) - Nothing -> do - when (isLinkToAnnex l) $ warning $ - "skipping " ++ file ++ - " (unknown backend " ++ - bname ++ ")" - return Nothing + where + makekey l = maybe (return Nothing) (makeret l) (fileKey $ takeFileName l) + makeret l k = let bname = keyBackendName k in + case maybeLookupBackendName bname of + Just backend -> do + return $ Just (k, backend) + Nothing -> do + when (isLinkToAnnex l) $ warning $ + "skipping " ++ file ++ + " (unknown backend " ++ bname ++ ")" + return Nothing {- Looks up the backend that should be used for a file. - That can be configured on a per-file basis in the gitattributes file. -} chooseBackend :: FilePath -> Annex (Maybe Backend) chooseBackend f = Annex.getState Annex.forcebackend >>= go - where - go Nothing = maybeLookupBackendName <$> - checkAttr "annex.backend" f - go (Just _) = Just . Prelude.head <$> orderedList + where + go Nothing = maybeLookupBackendName <$> checkAttr "annex.backend" f + go (Just _) = Just . Prelude.head <$> orderedList {- Looks up a backend by name. May fail if unknown. -} lookupBackendName :: String -> Backend lookupBackendName s = fromMaybe unknown $ maybeLookupBackendName s - where - unknown = error $ "unknown backend " ++ s + where + unknown = error $ "unknown backend " ++ s maybeLookupBackendName :: String -> Maybe Backend maybeLookupBackendName s = headMaybe matches - where - matches = filter (\b -> s == B.name b) list + where + matches = filter (\b -> s == B.name b) list diff --git a/Backend/SHA.hs b/Backend/SHA.hs index bfb94df..ef0e92d 100644 --- a/Backend/SHA.hs +++ b/Backend/SHA.hs @@ -57,24 +57,23 @@ shaN shasize file filesize = do Left sha -> liftIO $ sha <$> L.readFile file Right command -> liftIO $ parse command . lines <$> readsha command (toCommand [File file]) - where - parse command [] = bad command - parse command (l:_) - | null sha = bad command - | otherwise = sha - where - sha = fst $ separate (== ' ') l - bad command = error $ command ++ " parse error" - {- sha commands output the filename, so need to set fileEncoding -} - readsha command args = - withHandle StdoutHandle createProcessSuccess p $ \h -> do - fileEncoding h - output <- hGetContentsStrict h - hClose h - return output - where - p = (proc command args) - { std_out = CreatePipe } + where + parse command [] = bad command + parse command (l:_) + | null sha = bad command + | otherwise = sha + where + sha = fst $ separate (== ' ') l + bad command = error $ command ++ " parse error" + {- sha commands output the filename, so need to set fileEncoding -} + readsha command args = + withHandle StdoutHandle createProcessSuccess p $ \h -> do + fileEncoding h + output <- hGetContentsStrict h + hClose h + return output + where + p = (proc command args) { std_out = CreatePipe } shaCommand :: SHASize -> Integer -> Either (L.ByteString -> String) String shaCommand shasize filesize @@ -84,14 +83,14 @@ shaCommand shasize filesize | shasize == 384 = use SysConfig.sha384 sha384 | shasize == 512 = use SysConfig.sha512 sha512 | otherwise = error $ "bad sha size " ++ show shasize - where - use Nothing sha = Left $ showDigest . sha - use (Just c) sha - -- use builtin, but slower sha for small files - -- benchmarking indicates it's faster up to - -- and slightly beyond 50 kb files - | filesize < 51200 = use Nothing sha - | otherwise = Right c + where + use Nothing sha = Left $ showDigest . sha + use (Just c) sha + {- use builtin, but slower sha for small files + - benchmarking indicates it's faster up to + - and slightly beyond 50 kb files -} + | filesize < 51200 = use Nothing sha + | otherwise = Right c {- A key is a checksum of its contents. -} keyValue :: SHASize -> KeySource -> Annex (Maybe Key) @@ -109,23 +108,23 @@ keyValue shasize source = do {- Extension preserving keys. -} keyValueE :: SHASize -> KeySource -> Annex (Maybe Key) keyValueE size source = keyValue size source >>= maybe (return Nothing) addE - where - addE k = return $ Just $ k - { keyName = keyName k ++ selectExtension (keyFilename source) - , keyBackendName = shaNameE size - } + where + addE k = return $ Just $ k + { keyName = keyName k ++ selectExtension (keyFilename source) + , keyBackendName = shaNameE size + } selectExtension :: FilePath -> String selectExtension f | null es = "" | otherwise = join "." ("":es) - where - es = filter (not . null) $ reverse $ - take 2 $ takeWhile shortenough $ - reverse $ split "." $ takeExtensions f - shortenough e - | '\n' `elem` e = False -- newline in extension?! - | otherwise = length e <= 4 -- long enough for "jpeg" + where + es = filter (not . null) $ reverse $ + take 2 $ takeWhile shortenough $ + reverse $ split "." $ takeExtensions f + shortenough e + | '\n' `elem` e = False -- newline in extension?! + | otherwise = length e <= 4 -- long enough for "jpeg" {- A key's checksum is checked during fsck. -} checkKeyChecksum :: SHASize -> Key -> FilePath -> Annex Bool @@ -137,7 +136,7 @@ checkKeyChecksum size key file = do let filesize = fromIntegral $ fileSize stat check <$> shaN size file filesize _ -> return True - where - check s - | s == dropExtensions (keyName key) = True - | otherwise = False + where + check s + | s == dropExtensions (keyName key) = True + | otherwise = False diff --git a/Backend/URL.hs b/Backend/URL.hs index cc9112a..81c287c 100644 --- a/Backend/URL.hs +++ b/Backend/URL.hs @@ -32,10 +32,10 @@ fromUrl url size = stubKey , keyBackendName = "URL" , keySize = size } - where - -- when it's not too long, use the url as the key name - -- 256 is the absolute filename max, but use a shorter - -- length because this is not the entire key filename. - key - | length url < 128 = url - | otherwise = take 128 url ++ "-" ++ md5s (Str url) + where + {- when it's not too long, use the url as the key name + - 256 is the absolute filename max, but use a shorter + - length because this is not the entire key filename. -} + key + | length url < 128 = url + | otherwise = take 128 url ++ "-" ++ md5s (Str url) diff --git a/Build/Configure.hs b/Build/Configure.hs index 7fb195a..d25445f 100644 --- a/Build/Configure.hs +++ b/Build/Configure.hs @@ -19,7 +19,7 @@ tests = , testCp "cp_a" "-a" , testCp "cp_p" "-p" , testCp "cp_reflink_auto" "--reflink=auto" - , TestCase "uuid generator" $ selectCmd "uuid" ["uuid -m", "uuid", "uuidgen"] "" + , TestCase "uuid generator" $ selectCmd "uuid" [("uuid -m", ""), ("uuid", ""), ("uuidgen", "")] , TestCase "xargs -0" $ requireCmd "xargs_0" "xargs -0 /dev/null" , TestCase "curl" $ testCmd "curl" "curl --version >/dev/null" @@ -28,20 +28,35 @@ tests = , TestCase "gpg" $ testCmd "gpg" "gpg --version >/dev/null" , TestCase "lsof" $ testCmd "lsof" "lsof -v >/dev/null 2>&1" , TestCase "ssh connection caching" getSshConnectionCaching - ] ++ shaTestCases [1, 256, 512, 224, 384] + ] ++ shaTestCases + [ (1, "da39a3ee5e6b4b0d3255bfef95601890afd80709") + , (256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + , (512, "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e") + , (224, "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f") + , (384, "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b") + ] -shaTestCases :: [Int] -> [TestCase] +{- shaNsum are the program names used by coreutils. Some systems like OSX + - sometimes install these with 'g' prefixes. + - + - On some systems, shaN is used instead, but on other + - systems, it might be "hashalot", which does not produce + - usable checksums. Only accept programs that produce + - known-good hashes. -} +shaTestCases :: [(Int, String)] -> [TestCase] shaTestCases l = map make l - where - make n = TestCase key $ maybeSelectCmd key (shacmds n) " [x, osxpath x]) $ - map (\x -> "sha" ++ show n ++ x) ["", "sum"] - -- Max OSX puts GNU tools outside PATH, so look in - -- the location it uses, and remember where to run them - -- from. - osxpath = "/opt/local/libexec/gnubin" + where + make (n, knowngood) = TestCase key $ maybeSelectCmd key $ + zip (shacmds n) (repeat check) + where + key = "sha" ++ show n + check = " [x, 'g':x, osxpath x]) $ + map (\x -> "sha" ++ show n ++ x) ["sum", ""] + {- Max OSX sometimes puts GNU tools outside PATH, so look in + - the location it uses, and remember where to run them + - from. -} + osxpath = "/opt/local/libexec/gnubin" tmpDir :: String tmpDir = "tmp" @@ -51,9 +66,9 @@ testFile = tmpDir ++ "/testfile" testCp :: ConfigKey -> String -> TestCase testCp k option = TestCase cmd $ testCmd k cmdline - where - cmd = "cp " ++ option - cmdline = cmd ++ " " ++ testFile ++ " " ++ testFile ++ ".new" + where + cmd = "cp " ++ option + cmdline = cmd ++ " " ++ testFile ++ " " ++ testFile ++ ".new" {- Pulls package version out of the changelog. -} getVersion :: Test @@ -66,8 +81,8 @@ getVersionString = do changelog <- readFile "CHANGELOG" let verline = head $ lines changelog return $ middle (words verline !! 1) - where - middle = drop 1 . init + where + middle = drop 1 . init getGitVersion :: Test getGitVersion = do @@ -88,14 +103,14 @@ cabalSetup = do map (setfield "Version" version) $ lines cabal renameFile tmpcabalfile cabalfile - where - cabalfile = "git-annex.cabal" - tmpcabalfile = cabalfile++".tmp" - setfield field value s - | fullfield `isPrefixOf` s = fullfield ++ value - | otherwise = s - where - fullfield = field ++ ": " + where + cabalfile = "git-annex.cabal" + tmpcabalfile = cabalfile++".tmp" + setfield field value s + | fullfield `isPrefixOf` s = fullfield ++ value + | otherwise = s + where + fullfield = field ++ ": " setup :: IO () setup = do diff --git a/Build/InstallDesktopFile.hs b/Build/InstallDesktopFile.hs index 1bcba70..6339791 100644 --- a/Build/InstallDesktopFile.hs +++ b/Build/InstallDesktopFile.hs @@ -46,11 +46,11 @@ autostart command = genDesktopEntry systemwideInstall :: IO Bool systemwideInstall = isroot <||> destdirset - where - isroot = do - uid <- fromIntegral <$> getRealUserID - return $ uid == (0 :: Int) - destdirset = isJust <$> catchMaybeIO (getEnv "DESTDIR") + where + isroot = do + uid <- fromIntegral <$> getRealUserID + return $ uid == (0 :: Int) + destdirset = isJust <$> catchMaybeIO (getEnv "DESTDIR") inDestDir :: FilePath -> IO FilePath inDestDir f = do @@ -74,32 +74,6 @@ writeOSXDesktop command = do , userAutoStart osxAutoStartLabel ) - {- Install the OSX app in non-self-contained mode. -} - let appdir = "git-annex.app" - installOSXAppFile appdir "Contents/Info.plist" Nothing - installOSXAppFile appdir "Contents/Resources/git-annex.icns" Nothing - installOSXAppFile appdir "Contents/MacOS/git-annex-webapp" (Just webappscript) - where - webappscript = unlines - [ "#!/bin/sh" - , command ++ " webapp" - ] - -installOSXAppFile :: FilePath -> FilePath -> Maybe String -> IO () -installOSXAppFile appdir appfile mcontent = do - let src = "standalone" "osx" appdir appfile - home <- myHomeDir - dest <- ifM systemwideInstall - ( return $ "/Applications" appdir appfile - , return $ home "Desktop" appdir appfile - ) - createDirectoryIfMissing True (parentDir dest) - case mcontent of - Just content -> writeFile dest content - Nothing -> copyFile src dest - mode <- fileMode <$> getFileStatus src - setFileMode dest mode - install :: FilePath -> IO () install command = do #ifdef darwin_HOST_OS @@ -117,6 +91,6 @@ install command = do main :: IO () main = getArgs >>= go - where - go [] = error "specify git-annex command" - go (command:_) = install command + where + go [] = error "specify git-annex command" + go (command:_) = install command diff --git a/Build/TestConfig.hs b/Build/TestConfig.hs index 0cc2019..92f6f68 100644 --- a/Build/TestConfig.hs +++ b/Build/TestConfig.hs @@ -29,22 +29,22 @@ instance Show Config where [ key ++ " :: " ++ valuetype value , key ++ " = " ++ show value ] - where - valuetype (BoolConfig _) = "Bool" - valuetype (StringConfig _) = "String" - valuetype (MaybeStringConfig _) = "Maybe String" - valuetype (MaybeBoolConfig _) = "Maybe Bool" + where + valuetype (BoolConfig _) = "Bool" + valuetype (StringConfig _) = "String" + valuetype (MaybeStringConfig _) = "Maybe String" + valuetype (MaybeBoolConfig _) = "Maybe Bool" writeSysConfig :: [Config] -> IO () writeSysConfig config = writeFile "Build/SysConfig.hs" body - where - body = unlines $ header ++ map show config ++ footer - header = [ - "{- Automatically generated. -}" - , "module Build.SysConfig where" - , "" - ] - footer = [] + where + body = unlines $ header ++ map show config ++ footer + header = [ + "{- Automatically generated. -}" + , "module Build.SysConfig where" + , "" + ] + footer = [] runTests :: [TestCase] -> IO [Config] runTests [] = return [] @@ -60,12 +60,12 @@ requireCmd :: ConfigKey -> String -> Test requireCmd k cmdline = do ret <- testCmd k cmdline handle ret - where - handle r@(Config _ (BoolConfig True)) = return r - handle r = do - testEnd r - error $ "** the " ++ c ++ " command is required" - c = head $ words cmdline + where + handle r@(Config _ (BoolConfig True)) = return r + handle r = do + testEnd r + error $ "** the " ++ c ++ " command is required" + c = head $ words cmdline {- Checks if a command is available by running a command line. -} testCmd :: ConfigKey -> String -> Test @@ -75,7 +75,7 @@ testCmd k cmdline = do {- Ensures that one of a set of commands is available by running each in - turn. The Config is set to the first one found. -} -selectCmd :: ConfigKey -> [String] -> String -> Test +selectCmd :: ConfigKey -> [(String, String)] -> Test selectCmd k = searchCmd (return . Config k . StringConfig) (\cmds -> do @@ -83,20 +83,20 @@ selectCmd k = searchCmd error $ "* need one of these commands, but none are available: " ++ show cmds ) -maybeSelectCmd :: ConfigKey -> [String] -> String -> Test +maybeSelectCmd :: ConfigKey -> [(String, String)] -> Test maybeSelectCmd k = searchCmd (return . Config k . MaybeStringConfig . Just) (\_ -> return $ Config k $ MaybeStringConfig Nothing) -searchCmd :: (String -> Test) -> ([String] -> Test) -> [String] -> String -> Test -searchCmd success failure cmds param = search cmds - where - search [] = failure cmds - search (c:cs) = do - ret <- system $ quiet c ++ " " ++ param - if ret == ExitSuccess - then success c - else search cs +searchCmd :: (String -> Test) -> ([String] -> Test) -> [(String, String)] -> Test +searchCmd success failure cmdsparams = search cmdsparams + where + search [] = failure $ fst $ unzip cmdsparams + search ((c, params):cs) = do + ret <- system $ quiet $ c ++ " " ++ params + if ret == ExitSuccess + then success c + else search cs quiet :: String -> String quiet s = s ++ " >/dev/null 2>&1" diff --git a/CHANGELOG b/CHANGELOG index 0e01ba3..c05d529 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,54 @@ +git-annex (3.20121112) unstable; urgency=low + + * assistant: Can use XMPP to notify other nodes about pushes made to other + repositories, as well as pushing to them directly over XMPP. + * wepapp: Added an XMPP configuration interface. + * webapp: Supports pairing over XMPP, with both friends, and other repos + using the same account. + * assistant: Drops non-preferred content when possible. + * assistant: Notices, and applies config changes as they are made to + the git-annex branch, including config changes pushed in from remotes. + * git-annex-shell: GIT_ANNEX_SHELL_DIRECTORY can be set to limit it + to operating on a specified directory. + * webapp: When setting up authorized_keys, use GIT_ANNEX_SHELL_DIRECTORY. + * Preferred content path matching bugfix. + * Preferred content expressions cannot use "in=". + * Preferred content expressions can use "present". + * Fix handling of GIT_DIR when it refers to a git submodule. + * Depend on and use the Haskell SafeSemaphore library, which provides + exception-safe versions of SampleVar and QSemN. + Thanks, Ben Gamari for an excellent patch set. + * file:/// URLs can now be used with the web special remote. + * webapp: Allow dashes in ssh key comments when pairing. + * uninit: Check and abort if there are symlinks to annexed content that + are not checked into git. + * webapp: Switched to using the same multicast IP address that avahi uses. + * bup: Don't pass - to bup-split to make it read stdin; bup 0.25 + does not accept that. + * bugfix: Don't fail transferring content from read-only repos. + Closes: #691341 + * configure: Check that checksum programs produce correct checksums. + * Re-enable dbus, using a new version of the library that fixes the memory + leak. + * NetWatcher: When dbus connection is lost, try to reconnect. + * Use USER and HOME environment when set, and only fall back to getpwent, + which doesn't work with LDAP or NIS. + * rsync special remote: Include annex-rsync-options when running rsync + to test a key's presence. + * The standalone tarball's runshell now takes care of installing a + ~/.ssh/git-annex-shell wrapper the first time it's run. + * webapp: Make an initial, empty commit so there is a master branch + * assistant: Fix syncing local drives. + * webapp: Fix creation of rsync.net repositories. + * webapp: Fix renaming of special remotes. + * webapp: Generate better git remote names. + * webapp: Ensure that rsync special remotes are enabled using the same + name they were originally created using. + * Bugfix: Fix hang in webapp when setting up a ssh remote with an absolute + path. + + -- Joey Hess Mon, 12 Nov 2012 10:39:47 -0400 + git-annex (3.20121017) unstable; urgency=low * Fix zombie cleanup reversion introduced in 3.20121009. diff --git a/CmdLine.hs b/CmdLine.hs index 331c363..0b15521 100644 --- a/CmdLine.hs +++ b/CmdLine.hs @@ -44,13 +44,13 @@ dispatch fuzzyok allargs allcmds commonoptions fields header getgitrepo = do sequence_ flags prepCommand cmd params tryRun state' cmd $ [startup] ++ actions ++ [shutdown $ cmdnocommit cmd] - where - err msg = msg ++ "\n\n" ++ usage header allcmds commonoptions - cmd = Prelude.head cmds - (fuzzy, cmds, name, args) = findCmd fuzzyok allargs allcmds err - (flags, params) = getOptCmd args cmd commonoptions err - checkfuzzy = when fuzzy $ - inRepo $ Git.AutoCorrect.prepare name cmdname cmds + where + err msg = msg ++ "\n\n" ++ usage header allcmds commonoptions + cmd = Prelude.head cmds + (fuzzy, cmds, name, args) = findCmd fuzzyok allargs allcmds err + (flags, params) = getOptCmd args cmd commonoptions err + checkfuzzy = when fuzzy $ + inRepo $ Git.AutoCorrect.prepare name cmdname cmds {- Parses command line params far enough to find the Command to run, and - returns the remaining params. @@ -61,25 +61,25 @@ findCmd fuzzyok argv cmds err | not (null exactcmds) = (False, exactcmds, fromJust name, args) | fuzzyok && not (null inexactcmds) = (True, inexactcmds, fromJust name, args) | otherwise = error $ err $ "unknown command " ++ fromJust name - where - (name, args) = findname argv [] - findname [] c = (Nothing, reverse c) - findname (a:as) c - | "-" `isPrefixOf` a = findname as (a:c) - | otherwise = (Just a, reverse c ++ as) - exactcmds = filter (\c -> name == Just (cmdname c)) cmds - inexactcmds = case name of - Nothing -> [] - Just n -> Git.AutoCorrect.fuzzymatches n cmdname cmds + where + (name, args) = findname argv [] + findname [] c = (Nothing, reverse c) + findname (a:as) c + | "-" `isPrefixOf` a = findname as (a:c) + | otherwise = (Just a, reverse c ++ as) + exactcmds = filter (\c -> name == Just (cmdname c)) cmds + inexactcmds = case name of + Nothing -> [] + Just n -> Git.AutoCorrect.fuzzymatches n cmdname cmds {- Parses command line options, and returns actions to run to configure flags - and the remaining parameters for the command. -} getOptCmd :: Params -> Command -> [Option] -> (String -> String) -> (Flags, Params) getOptCmd argv cmd commonoptions err = check $ getOpt Permute (commonoptions ++ cmdoptions cmd) argv - where - check (flags, rest, []) = (flags, rest) - check (_, _, errs) = error $ err $ concat errs + where + check (flags, rest, []) = (flags, rest) + check (_, _, errs) = error $ err $ concat errs {- Runs a list of Annex actions. Catches IO errors and continues - (but explicitly thrown errors terminate the whole command). @@ -93,18 +93,18 @@ tryRun' errnum _ cmd [] tryRun' errnum state cmd (a:as) = do r <- run handle $! r - where - run = tryIO $ Annex.run state $ do - Annex.Queue.flushWhenFull - a - handle (Left err) = showerr err >> cont False state - handle (Right (success, state')) = cont success state' - cont success s = do - let errnum' = if success then errnum else errnum + 1 - (tryRun' $! errnum') s cmd as - showerr err = Annex.eval state $ do - showErr err - showEndFail + where + run = tryIO $ Annex.run state $ do + Annex.Queue.flushWhenFull + a + handle (Left err) = showerr err >> cont False state + handle (Right (success, state')) = cont success state' + cont success s = do + let errnum' = if success then errnum else errnum + 1 + (tryRun' $! errnum') s cmd as + showerr err = Annex.eval state $ do + showErr err + showEndFail {- Actions to perform each time ran. -} startup :: Annex Bool @@ -117,5 +117,6 @@ shutdown :: Bool -> Annex Bool shutdown nocommit = do saveState nocommit sequence_ =<< M.elems <$> Annex.getState Annex.cleanup + liftIO reapZombies -- zombies from long-running git processes sshCleanup -- ssh connection caching return True diff --git a/Command.hs b/Command.hs index 145ad50..bac2666 100644 --- a/Command.hs +++ b/Command.hs @@ -39,7 +39,6 @@ import Usage as ReExported import Logs.Trust import Config import Annex.CheckAttr -import qualified Git.Command {- Generates a normal command -} command :: String -> String -> [CommandSeek] -> String -> Command @@ -81,17 +80,14 @@ prepCommand Command { cmdseek = seek, cmdcheck = c } params = do {- Runs a command through the start, perform and cleanup stages -} doCommand :: CommandStart -> CommandCleanup doCommand = start - where - start = stage $ maybe skip perform - perform = stage $ maybe failure cleanup - cleanup = stage $ end - stage = (=<<) - skip = return True - failure = showEndFail >> return False - end r = do - -- zombies from long-running git processes - liftIO Git.Command.reap - showEndResult r >> return r + where + start = stage $ maybe skip perform + perform = stage $ maybe failure cleanup + cleanup = stage $ status + stage = (=<<) + skip = return True + failure = showEndFail >> return False + status r = showEndResult r >> return r {- Modifies an action to only act on files that are already annexed, - and passes the key and backend on to it. -} @@ -122,26 +118,26 @@ numCopies file = readish <$> checkAttr "annex.numcopies" file -} autoCopies :: FilePath -> Key -> (Int -> Int -> Bool) -> CommandStart -> CommandStart autoCopies file key vs a = Annex.getState Annex.auto >>= go - where - go False = a - go True = do - numcopiesattr <- numCopies file - needed <- getNumCopies numcopiesattr - (_, have) <- trustPartition UnTrusted =<< Remote.keyLocations key - if length have `vs` needed then a else stop + where + go False = a + go True = do + numcopiesattr <- numCopies file + needed <- getNumCopies numcopiesattr + have <- trustExclude UnTrusted =<< Remote.keyLocations key + if length have `vs` needed then a else stop autoCopiesWith :: FilePath -> Key -> (Int -> Int -> Bool) -> (Maybe Int -> CommandStart) -> CommandStart autoCopiesWith file key vs a = do numcopiesattr <- numCopies file Annex.getState Annex.auto >>= auto numcopiesattr - where - auto numcopiesattr False = a numcopiesattr - auto numcopiesattr True = do - needed <- getNumCopies numcopiesattr - (_, have) <- trustPartition UnTrusted =<< Remote.keyLocations key - if length have `vs` needed - then a numcopiesattr - else stop + where + auto numcopiesattr False = a numcopiesattr + auto numcopiesattr True = do + needed <- getNumCopies numcopiesattr + have <- trustExclude UnTrusted =<< Remote.keyLocations key + if length have `vs` needed + then a numcopiesattr + else stop checkAuto :: Annex Bool -> Annex Bool checkAuto checker = ifM (Annex.getState Annex.auto) diff --git a/Command/Add.hs b/Command/Add.hs index 73edb5e..7fa7cb3 100644 --- a/Command/Add.hs +++ b/Command/Add.hs @@ -32,20 +32,20 @@ seek = [withFilesNotInGit start, withFilesUnlocked start] - to its content. -} start :: FilePath -> CommandStart start file = notBareRepo $ ifAnnexed file fixup add - where - add = do - s <- liftIO $ getSymbolicLinkStatus file - if isSymbolicLink s || not (isRegularFile s) - then stop - else do - showStart "add" file - next $ perform file - fixup (key, _) = do - -- fixup from an interrupted add; the symlink - -- is present but not yet added to git - showStart "add" file - liftIO $ removeFile file - next $ next $ cleanup file key =<< inAnnex key + where + add = do + s <- liftIO $ getSymbolicLinkStatus file + if isSymbolicLink s || not (isRegularFile s) + then stop + else do + showStart "add" file + next $ perform file + fixup (key, _) = do + -- fixup from an interrupted add; the symlink + -- is present but not yet added to git + showStart "add" file + liftIO $ removeFile file + next $ next $ cleanup file key =<< inAnnex key {- The file that's being added is locked down before a key is generated, - to prevent it from being modified in between. It's hard linked into a @@ -67,15 +67,15 @@ ingest :: KeySource -> Annex (Maybe Key) ingest source = do backend <- chooseBackend $ keyFilename source genKey source backend >>= go - where - go Nothing = do - liftIO $ nukeFile $ contentLocation source - return Nothing - go (Just (key, _)) = do - handle (undo (keyFilename source) key) $ - moveAnnex key $ contentLocation source - liftIO $ nukeFile $ keyFilename source - return $ Just key + where + go Nothing = do + liftIO $ nukeFile $ contentLocation source + return Nothing + go (Just (key, _)) = do + handle (undo (keyFilename source) key) $ + moveAnnex key $ contentLocation source + liftIO $ nukeFile $ keyFilename source + return $ Just key perform :: FilePath -> CommandPerform perform file = @@ -91,12 +91,12 @@ undo file key e = do handle tryharder $ fromAnnex key file logStatus key InfoMissing throw e - where - -- fromAnnex could fail if the file ownership is weird - tryharder :: IOException -> Annex () - tryharder _ = do - src <- inRepo $ gitAnnexLocation key - liftIO $ moveFile src file + where + -- fromAnnex could fail if the file ownership is weird + tryharder :: IOException -> Annex () + tryharder _ = do + src <- inRepo $ gitAnnexLocation key + liftIO $ moveFile src file {- Creates the symlink to the annexed content, returns the link target. -} link :: FilePath -> Key -> Bool -> Annex String diff --git a/Command/AddUnused.hs b/Command/AddUnused.hs index f705003..519c67e 100644 --- a/Command/AddUnused.hs +++ b/Command/AddUnused.hs @@ -25,8 +25,8 @@ start = startUnused "addunused" perform (performOther "bad") (performOther "tmp" perform :: Key -> CommandPerform perform key = next $ Command.Add.cleanup file key True - where - file = "unused." ++ key2file key + where + file = "unused." ++ key2file key {- The content is not in the annex, but in another directory, and - it seems better to error out, rather than moving bad/tmp content into diff --git a/Command/AddUrl.hs b/Command/AddUrl.hs index bef1d68..0003237 100644 --- a/Command/AddUrl.hs +++ b/Command/AddUrl.hs @@ -40,31 +40,31 @@ seek = [withField fileOption return $ \f -> start :: Maybe FilePath -> Maybe Int -> String -> CommandStart start optfile pathdepth s = notBareRepo $ go $ fromMaybe bad $ parseURI s - where - bad = fromMaybe (error $ "bad url " ++ s) $ - parseURI $ escapeURIString isUnescapedInURI s - go url = do - let file = fromMaybe (url2file url pathdepth) optfile - showStart "addurl" file - next $ perform s file + where + bad = fromMaybe (error $ "bad url " ++ s) $ + parseURI $ escapeURIString isUnescapedInURI s + go url = do + let file = fromMaybe (url2file url pathdepth) optfile + showStart "addurl" file + next $ perform s file perform :: String -> FilePath -> CommandPerform perform url file = ifAnnexed file addurl geturl - where - geturl = do - liftIO $ createDirectoryIfMissing True (parentDir file) - ifM (Annex.getState Annex.fast) - ( nodownload url file , download url file ) - addurl (key, _backend) = do - headers <- getHttpHeaders - ifM (liftIO $ Url.check url headers $ keySize key) - ( do - setUrlPresent key url - next $ return True - , do - warning $ "failed to verify url: " ++ url - stop - ) + where + geturl = do + liftIO $ createDirectoryIfMissing True (parentDir file) + ifM (Annex.getState Annex.fast) + ( nodownload url file , download url file ) + addurl (key, _backend) = do + headers <- getHttpHeaders + ifM (liftIO $ Url.check url headers $ keySize key) + ( do + setUrlPresent key url + next $ return True + , do + warning $ "failed to verify url: " ++ url + stop + ) download :: String -> FilePath -> CommandPerform download url file = do @@ -103,10 +103,10 @@ url2file url pathdepth = case pathdepth of | depth > 0 -> frombits $ drop depth | depth < 0 -> frombits $ reverse . take (negate depth) . reverse | otherwise -> error "bad --pathdepth" - where - fullurl = uriRegName auth ++ uriPath url ++ uriQuery url - frombits a = join "/" $ a urlbits - urlbits = map (filesize . escape) $ filter (not . null) $ split "/" fullurl - auth = fromMaybe (error $ "bad url " ++ show url) $ uriAuthority url - filesize = take 255 - escape = replace "/" "_" . replace "?" "_" + where + fullurl = uriRegName auth ++ uriPath url ++ uriQuery url + frombits a = join "/" $ a urlbits + urlbits = map (filesize . escape) $ filter (not . null) $ split "/" fullurl + auth = fromMaybe (error $ "bad url " ++ show url) $ uriAuthority url + filesize = take 255 + escape = replace "/" "_" . replace "?" "_" diff --git a/Command/Assistant.hs b/Command/Assistant.hs index b039e27..ea8a87a 100644 --- a/Command/Assistant.hs +++ b/Command/Assistant.hs @@ -65,7 +65,7 @@ autoStart = do ) , nothing ) - where - go program dir = do - changeWorkingDirectory dir - boolSystem program [Param "assistant"] + where + go program dir = do + changeWorkingDirectory dir + boolSystem program [Param "assistant"] diff --git a/Command/Commit.hs b/Command/Commit.hs index d3ce3d7..1659061 100644 --- a/Command/Commit.hs +++ b/Command/Commit.hs @@ -24,6 +24,6 @@ start = next $ next $ do Annex.Branch.commit "update" _ <- runhook <=< inRepo $ Git.hookPath "annex-content" return True - where - runhook (Just hook) = liftIO $ boolSystem hook [] - runhook Nothing = return True + where + runhook (Just hook) = liftIO $ boolSystem hook [] + runhook Nothing = return True diff --git a/Command/Copy.hs b/Command/Copy.hs index 4352aaa..dd55992 100644 --- a/Command/Copy.hs +++ b/Command/Copy.hs @@ -29,7 +29,7 @@ start :: Maybe Remote -> Maybe Remote -> FilePath -> (Key, Backend) -> CommandSt start to from file (key, backend) = autoCopies file key (<) $ stopUnless shouldCopy $ Command.Move.start to from False file (key, backend) - where - shouldCopy = case to of - Nothing -> checkAuto $ wantGet (Just file) - Just r -> checkAuto $ wantSend (Just file) (Remote.uuid r) + where + shouldCopy = case to of + Nothing -> checkAuto $ wantGet (Just file) + Just r -> checkAuto $ wantSend (Just file) (Remote.uuid r) diff --git a/Command/Drop.hs b/Command/Drop.hs index 26e80f8..6c210b1 100644 --- a/Command/Drop.hs +++ b/Command/Drop.hs @@ -48,7 +48,7 @@ startLocal file numcopies key = stopUnless (inAnnex key) $ do startRemote :: FilePath -> Maybe Int -> Key -> Remote -> CommandStart startRemote file numcopies key remote = do - showStart "drop" file + showStart ("drop " ++ Remote.name remote) file next $ performRemote key numcopies remote performLocal :: Key -> Maybe Int -> CommandPerform @@ -76,8 +76,8 @@ performRemote key numcopies remote = lockContent key $ do stopUnless (canDropKey key numcopies have tocheck [uuid]) $ do ok <- Remote.removeKey remote key next $ cleanupRemote key remote ok - where - uuid = Remote.uuid remote + where + uuid = Remote.uuid remote cleanupLocal :: Key -> CommandCleanup cleanupLocal key = do @@ -106,20 +106,20 @@ canDropKey key numcopiesM have check skip = do findCopies :: Key -> Int -> [UUID] -> [UUID] -> [Remote] -> Annex Bool findCopies key need skip = helper [] - where - helper bad have [] - | length have >= need = return True - | otherwise = notEnoughCopies key need have skip bad - helper bad have (r:rs) - | length have >= need = return True - | otherwise = do - let u = Remote.uuid r - let duplicate = u `elem` have - haskey <- Remote.hasKey r key - case (duplicate, haskey) of - (False, Right True) -> helper bad (u:have) rs - (False, Left _) -> helper (r:bad) have rs - _ -> helper bad have rs + where + helper bad have [] + | length have >= need = return True + | otherwise = notEnoughCopies key need have skip bad + helper bad have (r:rs) + | length have >= need = return True + | otherwise = do + let u = Remote.uuid r + let duplicate = u `elem` have + haskey <- Remote.hasKey r key + case (duplicate, haskey) of + (False, Right True) -> helper bad (u:have) rs + (False, Left _) -> helper (r:bad) have rs + _ -> helper bad have rs notEnoughCopies :: Key -> Int -> [UUID] -> [UUID] -> [Remote] -> Annex Bool notEnoughCopies key need have skip bad = do @@ -132,6 +132,6 @@ notEnoughCopies key need have skip bad = do Remote.showLocations key (have++skip) hint return False - where - unsafe = showNote "unsafe" - hint = showLongNote "(Use --force to override this check, or adjust annex.numcopies.)" + where + unsafe = showNote "unsafe" + hint = showLongNote "(Use --force to override this check, or adjust annex.numcopies.)" diff --git a/Command/DropUnused.hs b/Command/DropUnused.hs index 597a4ee..00c0eec 100644 --- a/Command/DropUnused.hs +++ b/Command/DropUnused.hs @@ -29,13 +29,13 @@ start = startUnused "dropunused" perform (performOther gitAnnexBadLocation) (per perform :: Key -> CommandPerform perform key = maybe droplocal dropremote =<< Remote.byName =<< from - where - dropremote r = do - showAction $ "from " ++ Remote.name r - ok <- Remote.removeKey r key - next $ Command.Drop.cleanupRemote key r ok - droplocal = Command.Drop.performLocal key (Just 0) -- force drop - from = Annex.getField $ Option.name Command.Drop.fromOption + where + dropremote r = do + showAction $ "from " ++ Remote.name r + ok <- Remote.removeKey r key + next $ Command.Drop.cleanupRemote key r ok + droplocal = Command.Drop.performLocal key (Just 0) -- force drop + from = Annex.getField $ Option.name Command.Drop.fromOption performOther :: (Key -> Git.Repo -> FilePath) -> Key -> CommandPerform performOther filespec key = do diff --git a/Command/Find.hs b/Command/Find.hs index 177b794..1e509d1 100644 --- a/Command/Find.hs +++ b/Command/Find.hs @@ -29,14 +29,14 @@ formatOption = Option.field [] "format" paramFormat "control format of output" print0Option :: Option print0Option = Option.Option [] ["print0"] (Option.NoArg set) "terminate output with null" - where - set = Annex.setField (Option.name formatOption) "${file}\0" + where + set = Annex.setField (Option.name formatOption) "${file}\0" seek :: [CommandSeek] seek = [withField formatOption formatconverter $ \f -> withFilesInGit $ whenAnnexed $ start f] - where - formatconverter = return . fmap Utility.Format.gen + where + formatconverter = return . fmap Utility.Format.gen start :: Maybe Utility.Format.Format -> FilePath -> (Key, Backend) -> CommandStart start format file (key, _) = do @@ -50,12 +50,12 @@ start format file (key, _) = do Utility.Format.format formatter $ M.fromList vars stop - where - vars = - [ ("file", file) - , ("key", key2file key) - , ("backend", keyBackendName key) - , ("bytesize", size show) - , ("humansize", size $ roughSize storageUnits True) - ] - size c = maybe "unknown" c $ keySize key + where + vars = + [ ("file", file) + , ("key", key2file key) + , ("backend", keyBackendName key) + , ("bytesize", size show) + , ("humansize", size $ roughSize storageUnits True) + ] + size c = maybe "unknown" c $ keySize key diff --git a/Command/Fsck.hs b/Command/Fsck.hs index 5e130c9..deb3a5c 100644 --- a/Command/Fsck.hs +++ b/Command/Fsck.hs @@ -78,22 +78,22 @@ withIncremental = withValue $ do (True, _, _) -> maybe startIncremental (return . ContIncremental . Just) =<< getStartTime - where - startIncremental = do - recordStartTime - return StartIncremental - - checkschedule Nothing = error "bad --incremental-schedule value" - checkschedule (Just delta) = do - Annex.addCleanup "" $ do - v <- getStartTime - case v of - Nothing -> noop - Just started -> do - now <- liftIO getPOSIXTime - when (now - realToFrac started >= delta) $ - resetStartTime - return True + where + startIncremental = do + recordStartTime + return StartIncremental + + checkschedule Nothing = error "bad --incremental-schedule value" + checkschedule (Just delta) = do + Annex.addCleanup "" $ do + v <- getStartTime + case v of + Nothing -> noop + Just started -> do + now <- liftIO getPOSIXTime + when (now - realToFrac started >= delta) $ + resetStartTime + return True start :: Maybe Remote -> Incremental -> FilePath -> (Key, Backend) -> CommandStart start from inc file (key, backend) = do @@ -101,8 +101,8 @@ start from inc file (key, backend) = do case from of Nothing -> go $ perform key file backend numcopies Just r -> go $ performRemote key file backend numcopies r - where - go = runFsck inc file key + where + go = runFsck inc file key perform :: Key -> FilePath -> Backend -> Maybe Int -> Annex Bool perform key file backend numcopies = check @@ -119,48 +119,48 @@ perform key file backend numcopies = check performRemote :: Key -> FilePath -> Backend -> Maybe Int -> Remote -> Annex Bool performRemote key file backend numcopies remote = dispatch =<< Remote.hasKey remote key - where - dispatch (Left err) = do - showNote err - return False - dispatch (Right True) = withtmp $ \tmpfile -> - ifM (getfile tmpfile) - ( go True (Just tmpfile) - , go True Nothing - ) - dispatch (Right False) = go False Nothing - go present localcopy = check - [ verifyLocationLogRemote key file remote present - , checkKeySizeRemote key remote localcopy - , checkBackendRemote backend key remote localcopy - , checkKeyNumCopies key file numcopies - ] - withtmp a = do - pid <- liftIO getProcessID - t <- fromRepo gitAnnexTmpDir - createAnnexDirectory t - let tmp = t "fsck" ++ show pid ++ "." ++ keyFile key - let cleanup = liftIO $ catchIO (removeFile tmp) (const noop) - cleanup - cleanup `after` a tmp - getfile tmp = - ifM (Remote.retrieveKeyFileCheap remote key tmp) - ( return True - , ifM (Annex.getState Annex.fast) - ( return False - , Remote.retrieveKeyFile remote key Nothing tmp - ) + where + dispatch (Left err) = do + showNote err + return False + dispatch (Right True) = withtmp $ \tmpfile -> + ifM (getfile tmpfile) + ( go True (Just tmpfile) + , go True Nothing + ) + dispatch (Right False) = go False Nothing + go present localcopy = check + [ verifyLocationLogRemote key file remote present + , checkKeySizeRemote key remote localcopy + , checkBackendRemote backend key remote localcopy + , checkKeyNumCopies key file numcopies + ] + withtmp a = do + pid <- liftIO getProcessID + t <- fromRepo gitAnnexTmpDir + createAnnexDirectory t + let tmp = t "fsck" ++ show pid ++ "." ++ keyFile key + let cleanup = liftIO $ catchIO (removeFile tmp) (const noop) + cleanup + cleanup `after` a tmp + getfile tmp = + ifM (Remote.retrieveKeyFileCheap remote key tmp) + ( return True + , ifM (Annex.getState Annex.fast) + ( return False + , Remote.retrieveKeyFile remote key Nothing tmp ) + ) {- To fsck a bare repository, fsck each key in the location log. -} withBarePresentKeys :: (Key -> CommandStart) -> CommandSeek withBarePresentKeys a params = isBareRepo >>= go - where - go False = return [] - go True = do - unless (null params) $ - error "fsck should be run without parameters in a bare repository" - map a <$> loggedKeys + where + go False = return [] + go True = do + unless (null params) $ + error "fsck should be run without parameters in a bare repository" + map a <$> loggedKeys startBare :: Incremental -> Key -> CommandStart startBare inc key = case Backend.maybeLookupBackendName (Types.Key.keyBackendName key) of @@ -242,10 +242,10 @@ verifyLocationLog' key desc present u bad = do "but its content is missing." return False _ -> return True - where - fix s = do - showNote "fixing location log" - bad s + where + fix s = do + showNote "fixing location log" + bad s {- The size of the data for a key is checked against the size encoded in - the key's metadata, if available. -} @@ -269,19 +269,19 @@ checkKeySizeOr bad key file = case Types.Key.keySize key of size' <- fromIntegral . fileSize <$> liftIO (getFileStatus file) comparesizes size size' - where - comparesizes a b = do - let same = a == b - unless same $ badsize a b - return same - badsize a b = do - msg <- bad key - warning $ concat - [ "Bad file size (" - , compareSizes storageUnits True a b - , "); " - , msg - ] + where + comparesizes a b = do + let same = a == b + unless same $ badsize a b + return same + badsize a b = do + msg <- bad key + warning $ concat + [ "Bad file size (" + , compareSizes storageUnits True a b + , "); " + , msg + ] checkBackend :: Backend -> Key -> Annex Bool checkBackend backend key = do @@ -290,8 +290,8 @@ checkBackend backend key = do checkBackendRemote :: Backend -> Key -> Remote -> Maybe FilePath -> Annex Bool checkBackendRemote backend key remote = maybe (return True) go - where - go = checkBackendOr (badContentRemote remote) backend key + where + go = checkBackendOr (badContentRemote remote) backend key checkBackendOr :: (Key -> Annex String) -> Backend -> Key -> FilePath -> Annex Bool checkBackendOr bad backend key file = @@ -414,9 +414,9 @@ recordStartTime = do t <- modificationTime <$> getFileStatus f hPutStr h $ showTime $ realToFrac t hClose h - where - showTime :: POSIXTime -> String - showTime = show + where + showTime :: POSIXTime -> String + showTime = show resetStartTime :: Annex () resetStartTime = liftIO . nukeFile =<< fromRepo gitAnnexFsckState @@ -431,7 +431,7 @@ getStartTime = do return $ if Just (realToFrac timestamp) == t then Just timestamp else Nothing - where - readishTime :: String -> Maybe POSIXTime - readishTime s = utcTimeToPOSIXSeconds <$> - parseTime defaultTimeLocale "%s%Qs" s + where + readishTime :: String -> Maybe POSIXTime + readishTime s = utcTimeToPOSIXSeconds <$> + parseTime defaultTimeLocale "%s%Qs" s diff --git a/Command/Get.hs b/Command/Get.hs index c95e4eb..7f02e79 100644 --- a/Command/Get.hs +++ b/Command/Get.hs @@ -32,10 +32,10 @@ start from file (key, _) = stopUnless ((not <$> inAnnex key) <&&> checkAuto (wan -- get --from = copy --from stopUnless (Command.Move.fromOk src key) $ go $ Command.Move.fromPerform src False key file - where - go a = do - showStart "get" file - next a + where + go a = do + showStart "get" file + next a perform :: Key -> FilePath -> CommandPerform perform key file = stopUnless (getViaTmp key $ getKeyFile key file) $ @@ -45,29 +45,29 @@ perform key file = stopUnless (getViaTmp key $ getKeyFile key file) $ - and copy it to here. -} getKeyFile :: Key -> FilePath -> FilePath -> Annex Bool getKeyFile key file dest = dispatch =<< Remote.keyPossibilities key - where - dispatch [] = do - showNote "not available" - Remote.showLocations key [] - return False - dispatch remotes = trycopy remotes remotes - trycopy full [] = do - Remote.showTriedRemotes full - Remote.showLocations key [] - return False - trycopy full (r:rs) = - ifM (probablyPresent r) - ( docopy r (trycopy full rs) - , trycopy full rs - ) - -- This check is to avoid an ugly message if a remote is a - -- drive that is not mounted. - probablyPresent r - | Remote.hasKeyCheap r = - either (const False) id <$> Remote.hasKey r key - | otherwise = return True - docopy r continue = do - ok <- download (Remote.uuid r) key (Just file) noRetry $ do - showAction $ "from " ++ Remote.name r - Remote.retrieveKeyFile r key (Just file) dest - if ok then return ok else continue + where + dispatch [] = do + showNote "not available" + Remote.showLocations key [] + return False + dispatch remotes = trycopy remotes remotes + trycopy full [] = do + Remote.showTriedRemotes full + Remote.showLocations key [] + return False + trycopy full (r:rs) = + ifM (probablyPresent r) + ( docopy r (trycopy full rs) + , trycopy full rs + ) + -- This check is to avoid an ugly message if a remote is a + -- drive that is not mounted. + probablyPresent r + | Remote.hasKeyCheap r = + either (const False) id <$> Remote.hasKey r key + | otherwise = return True + docopy r continue = do + ok <- download (Remote.uuid r) key (Just file) noRetry $ do + showAction $ "from " ++ Remote.name r + Remote.retrieveKeyFile r key (Just file) dest + if ok then return ok else continue diff --git a/Command/Help.hs b/Command/Help.hs index 80a7b95..95033eb 100644 --- a/Command/Help.hs +++ b/Command/Help.hs @@ -47,5 +47,5 @@ showHelp = liftIO $ putStrLn $ unlines ] , "Run git-annex without any options for a complete command and option list." ] - where - cmdline c = "\t" ++ cmdname c ++ "\t" ++ cmddesc c + where + cmdline c = "\t" ++ cmdname c ++ "\t" ++ cmddesc c diff --git a/Command/InAnnex.hs b/Command/InAnnex.hs index ac4af8d..cd4bff2 100644 --- a/Command/InAnnex.hs +++ b/Command/InAnnex.hs @@ -20,8 +20,8 @@ seek = [withKeys start] start :: Key -> CommandStart start key = inAnnexSafe key >>= dispatch - where - dispatch (Just True) = stop - dispatch (Just False) = exit 1 - dispatch Nothing = exit 100 - exit n = liftIO $ exitWith $ ExitFailure n + where + dispatch (Just True) = stop + dispatch (Just False) = exit 1 + dispatch Nothing = exit 100 + exit n = liftIO $ exitWith $ ExitFailure n diff --git a/Command/Init.hs b/Command/Init.hs index bbabdc4..342ef84 100644 --- a/Command/Init.hs +++ b/Command/Init.hs @@ -22,8 +22,8 @@ start :: [String] -> CommandStart start ws = do showStart "init" description next $ perform description - where - description = unwords ws + where + description = unwords ws perform :: String -> CommandPerform perform description = do diff --git a/Command/InitRemote.hs b/Command/InitRemote.hs index ad93529..720fddd 100644 --- a/Command/InitRemote.hs +++ b/Command/InitRemote.hs @@ -40,8 +40,8 @@ start (name:ws) = do showStart "initremote" name next $ perform t u name $ M.union config c - where - config = Logs.Remote.keyValToConfig ws + where + config = Logs.Remote.keyValToConfig ws perform :: RemoteType -> UUID -> String -> R.RemoteConfig -> CommandPerform perform t u name c = do @@ -59,19 +59,19 @@ findByName :: String -> Annex (UUID, R.RemoteConfig) findByName name = do m <- Logs.Remote.readRemoteLog maybe generate return $ findByName' name m - where - generate = do - uuid <- liftIO genUUID - return (uuid, M.insert nameKey name M.empty) + where + generate = do + uuid <- liftIO genUUID + return (uuid, M.insert nameKey name M.empty) findByName' :: String -> M.Map UUID R.RemoteConfig -> Maybe (UUID, R.RemoteConfig) findByName' n = headMaybe . filter (matching . snd) . M.toList - where - matching c = case M.lookup nameKey c of - Nothing -> False - Just n' - | n' == n -> True - | otherwise -> False + where + matching c = case M.lookup nameKey c of + Nothing -> False + Just n' + | n' == n -> True + | otherwise -> False remoteNames :: Annex [String] remoteNames = do @@ -81,12 +81,12 @@ remoteNames = do {- find the specified remote type -} findType :: R.RemoteConfig -> Annex RemoteType findType config = maybe unspecified specified $ M.lookup typeKey config - where - unspecified = error "Specify the type of remote with type=" - specified s = case filter (findtype s) Remote.remoteTypes of - [] -> error $ "Unknown remote type " ++ s - (t:_) -> return t - findtype s i = R.typename i == s + where + unspecified = error "Specify the type of remote with type=" + specified s = case filter (findtype s) Remote.remoteTypes of + [] -> error $ "Unknown remote type " ++ s + (t:_) -> return t + findtype s i = R.typename i == s {- The name of a configured remote is stored in its config using this key. -} nameKey :: String diff --git a/Command/Log.hs b/Command/Log.hs index c3ce679..6608a99 100644 --- a/Command/Log.hs +++ b/Command/Log.hs @@ -47,9 +47,8 @@ passthruOptions = map odate ["since", "after", "until", "before"] ++ [ Option.field ['n'] "max-count" paramNumber "limit number of logs displayed" ] - where - odate n = Option.field [] n paramDate $ - "show log " ++ n ++ " date" + where + odate n = Option.field [] n paramDate $ "show log " ++ n ++ " date" gourceOption :: Option gourceOption = Option.flag [] "gource" "format output for gource" @@ -60,53 +59,53 @@ seek = [withValue Remote.uuidDescriptions $ \m -> withValue (concat <$> mapM getoption passthruOptions) $ \os -> withFlag gourceOption $ \gource -> withFilesInGit $ whenAnnexed $ start m zone os gource] - where - getoption o = maybe [] (use o) <$> - Annex.getField (Option.name o) - use o v = [Param ("--" ++ Option.name o), Param v] + where + getoption o = maybe [] (use o) <$> + Annex.getField (Option.name o) + use o v = [Param ("--" ++ Option.name o), Param v] start :: M.Map UUID String -> TimeZone -> [CommandParam] -> Bool -> FilePath -> (Key, Backend) -> CommandStart start m zone os gource file (key, _) = do showLog output =<< readLog <$> getLog key os -- getLog produces a zombie; reap it - liftIO Git.Command.reap + liftIO reapZombies stop - where - output - | gource = gourceOutput lookupdescription file - | otherwise = normalOutput lookupdescription file zone - lookupdescription u = fromMaybe (fromUUID u) $ M.lookup u m + where + output + | gource = gourceOutput lookupdescription file + | otherwise = normalOutput lookupdescription file zone + lookupdescription u = fromMaybe (fromUUID u) $ M.lookup u m showLog :: Outputter -> [RefChange] -> Annex () showLog outputter ps = do sets <- mapM (getset newref) ps previous <- maybe (return genesis) (getset oldref) (lastMaybe ps) sequence_ $ compareChanges outputter $ sets ++ [previous] - where - genesis = (0, S.empty) - getset select change = do - s <- S.fromList <$> get (select change) - return (changetime change, s) - get ref = map toUUID . Logs.Presence.getLog . L.unpack <$> - catObject ref + where + genesis = (0, S.empty) + getset select change = do + s <- S.fromList <$> get (select change) + return (changetime change, s) + get ref = map toUUID . Logs.Presence.getLog . L.unpack <$> + catObject ref normalOutput :: (UUID -> String) -> FilePath -> TimeZone -> Outputter normalOutput lookupdescription file zone present ts us = liftIO $ mapM_ (putStrLn . format) us - where - time = showTimeStamp zone ts - addel = if present then "+" else "-" - format u = unwords [ addel, time, file, "|", - fromUUID u ++ " -- " ++ lookupdescription u ] + where + time = showTimeStamp zone ts + addel = if present then "+" else "-" + format u = unwords [ addel, time, file, "|", + fromUUID u ++ " -- " ++ lookupdescription u ] gourceOutput :: (UUID -> String) -> FilePath -> Outputter gourceOutput lookupdescription file present ts us = liftIO $ mapM_ (putStrLn . intercalate "|" . format) us - where - time = takeWhile isDigit $ show ts - addel = if present then "A" else "M" - format u = [ time, lookupdescription u, addel, file ] + where + time = takeWhile isDigit $ show ts + addel = if present then "A" else "M" + format u = [ time, lookupdescription u, addel, file ] {- Generates a display of the changes (which are ordered with newest first), - by comparing each change with the previous change. @@ -114,12 +113,12 @@ gourceOutput lookupdescription file present ts us = - removed. -} compareChanges :: Ord a => (Bool -> POSIXTime -> [a] -> b) -> [(POSIXTime, S.Set a)] -> [b] compareChanges format changes = concatMap diff $ zip changes (drop 1 changes) - where - diff ((ts, new), (_, old)) = - [format True ts added, format False ts removed] - where - added = S.toList $ S.difference new old - removed = S.toList $ S.difference old new + where + diff ((ts, new), (_, old)) = + [format True ts added, format False ts removed] + where + added = S.toList $ S.difference new old + removed = S.toList $ S.difference old new {- Gets the git log for a given location log file. - @@ -148,22 +147,21 @@ getLog key os = do readLog :: [String] -> [RefChange] readLog = mapMaybe (parse . lines) - where - parse (ts:raw:[]) = let (old, new) = parseRaw raw in - Just RefChange - { changetime = parseTimeStamp ts - , oldref = old - , newref = new - } - parse _ = Nothing + where + parse (ts:raw:[]) = let (old, new) = parseRaw raw in + Just RefChange + { changetime = parseTimeStamp ts + , oldref = old + , newref = new + } + parse _ = Nothing -- Parses something like ":100644 100644 oldsha newsha M" parseRaw :: String -> (Git.Ref, Git.Ref) -parseRaw l = (Git.Ref oldsha, Git.Ref newsha) - where - ws = words l - oldsha = ws !! 2 - newsha = ws !! 3 +parseRaw l = go $ words l + where + go (_:_:oldsha:newsha:_) = (Git.Ref oldsha, Git.Ref newsha) + go _ = error $ "unable to parse git log output: " ++ l parseTimeStamp :: String -> POSIXTime parseTimeStamp = utcTimeToPOSIXSeconds . fromMaybe (error "bad timestamp") . diff --git a/Command/Map.hs b/Command/Map.hs index 3dbdadb..94b1289 100644 --- a/Command/Map.hs +++ b/Command/Map.hs @@ -63,14 +63,13 @@ start = do -} drawMap :: [Git.Repo] -> M.Map UUID String -> [UUID] -> String drawMap rs umap ts = Dot.graph $ repos ++ trusted ++ others - where - repos = map (node umap rs) rs - ruuids = ts ++ map getUncachedUUID rs - others = map (unreachable . uuidnode) $ - filter (`notElem` ruuids) (M.keys umap) - trusted = map (trustworthy . uuidnode) ts - uuidnode u = Dot.graphNode (fromUUID u) $ - M.findWithDefault "" u umap + where + repos = map (node umap rs) rs + ruuids = ts ++ map getUncachedUUID rs + others = map (unreachable . uuidnode) $ + filter (`notElem` ruuids) (M.keys umap) + trusted = map (trustworthy . uuidnode) ts + uuidnode u = Dot.graphNode (fromUUID u) $ M.findWithDefault "" u umap hostname :: Git.Repo -> String hostname r @@ -86,9 +85,9 @@ repoName :: M.Map UUID String -> Git.Repo -> String repoName umap r | repouuid == NoUUID = fallback | otherwise = M.findWithDefault fallback repouuid umap - where - repouuid = getUncachedUUID r - fallback = fromMaybe "unknown" $ Git.remoteName r + where + repouuid = getUncachedUUID r + fallback = fromMaybe "unknown" $ Git.remoteName r {- A unique id for the node for a repo. Uses the annex.uuid if available. -} nodeId :: Git.Repo -> String @@ -100,32 +99,32 @@ nodeId r = {- A node representing a repo. -} node :: M.Map UUID String -> [Git.Repo] -> Git.Repo -> String node umap fullinfo r = unlines $ n:edges - where - n = Dot.subGraph (hostname r) (basehostname r) "lightblue" $ - decorate $ Dot.graphNode (nodeId r) (repoName umap r) - edges = map (edge umap fullinfo r) (Git.remotes r) - decorate - | Git.config r == M.empty = unreachable - | otherwise = reachable + where + n = Dot.subGraph (hostname r) (basehostname r) "lightblue" $ + decorate $ Dot.graphNode (nodeId r) (repoName umap r) + edges = map (edge umap fullinfo r) (Git.remotes r) + decorate + | Git.config r == M.empty = unreachable + | otherwise = reachable {- An edge between two repos. The second repo is a remote of the first. -} edge :: M.Map UUID String -> [Git.Repo] -> Git.Repo -> Git.Repo -> String edge umap fullinfo from to = Dot.graphEdge (nodeId from) (nodeId fullto) edgename - where - -- get the full info for the remote, to get its UUID - fullto = findfullinfo to - findfullinfo n = - case filter (same n) fullinfo of - [] -> n - (n':_) -> n' - {- Only name an edge if the name is different than the name - - that will be used for the destination node, and is - - different from its hostname. (This reduces visual clutter.) -} - edgename = maybe Nothing calcname $ Git.remoteName to - calcname n - | n `elem` [repoName umap fullto, hostname fullto] = Nothing - | otherwise = Just n + where + -- get the full info for the remote, to get its UUID + fullto = findfullinfo to + findfullinfo n = + case filter (same n) fullinfo of + [] -> n + (n':_) -> n' + {- Only name an edge if the name is different than the name + - that will be used for the destination node, and is + - different from its hostname. (This reduces visual clutter.) -} + edgename = maybe Nothing calcname $ Git.remoteName to + calcname n + | n `elem` [repoName umap fullto, hostname fullto] = Nothing + | otherwise = Just n unreachable :: String -> String unreachable = Dot.fillColor "red" @@ -165,11 +164,10 @@ same a b | both Git.repoIsUrl && neither Git.repoIsSsh = matching show | neither Git.repoIsSsh = matching Git.repoPath | otherwise = False - - where - matching t = t a == t b - both t = t a && t b - neither t = not (t a) && not (t b) + where + matching t = t a == t b + both t = t a && t b + neither t = not (t a) && not (t b) {- reads the config of a remote, with progress display -} scan :: Git.Repo -> Annex Git.Repo @@ -192,50 +190,49 @@ tryScan r | Git.repoIsSsh r = sshscan | Git.repoIsUrl r = return Nothing | otherwise = safely $ Git.Config.read r - where - safely a = do - result <- liftIO (try a :: IO (Either SomeException Git.Repo)) - case result of - Left _ -> return Nothing - Right r' -> return $ Just r' - pipedconfig cmd params = safely $ - withHandle StdoutHandle createProcessSuccess p $ - Git.Config.hRead r - where - p = proc cmd $ toCommand params - - configlist = - onRemote r (pipedconfig, Nothing) "configlist" [] [] - manualconfiglist = do - sshparams <- sshToRepo r [Param sshcmd] - liftIO $ pipedconfig "ssh" sshparams - where - sshcmd = cddir ++ " && " ++ - "git config --null --list" - dir = Git.repoPath r - cddir - | "/~" `isPrefixOf` dir = - let (userhome, reldir) = span (/= '/') (drop 1 dir) - in "cd " ++ userhome ++ " && cd " ++ shellEscape (drop 1 reldir) - | otherwise = "cd " ++ shellEscape dir - - -- First, try sshing and running git config manually, - -- only fall back to git-annex-shell configlist if that - -- fails. - -- - -- This is done for two reasons, first I'd like this - -- subcommand to be usable on non-git-annex repos. - -- Secondly, configlist doesn't include information about - -- the remote's remotes. - sshscan = do - sshnote - v <- manualconfiglist - case v of - Nothing -> do - sshnote - configlist - ok -> return ok - - sshnote = do - showAction "sshing" - showOutput + where + safely a = do + result <- liftIO (try a :: IO (Either SomeException Git.Repo)) + case result of + Left _ -> return Nothing + Right r' -> return $ Just r' + pipedconfig cmd params = safely $ + withHandle StdoutHandle createProcessSuccess p $ + Git.Config.hRead r + where + p = proc cmd $ toCommand params + + configlist = onRemote r (pipedconfig, Nothing) "configlist" [] [] + manualconfiglist = do + sshparams <- sshToRepo r [Param sshcmd] + liftIO $ pipedconfig "ssh" sshparams + where + sshcmd = cddir ++ " && " ++ + "git config --null --list" + dir = Git.repoPath r + cddir + | "/~" `isPrefixOf` dir = + let (userhome, reldir) = span (/= '/') (drop 1 dir) + in "cd " ++ userhome ++ " && cd " ++ shellEscape (drop 1 reldir) + | otherwise = "cd " ++ shellEscape dir + + -- First, try sshing and running git config manually, + -- only fall back to git-annex-shell configlist if that + -- fails. + -- + -- This is done for two reasons, first I'd like this + -- subcommand to be usable on non-git-annex repos. + -- Secondly, configlist doesn't include information about + -- the remote's remotes. + sshscan = do + sshnote + v <- manualconfiglist + case v of + Nothing -> do + sshnote + configlist + ok -> return ok + + sshnote = do + showAction "sshing" + showOutput diff --git a/Command/Migrate.hs b/Command/Migrate.hs index d3b29ee..0b23c2a 100644 --- a/Command/Migrate.hs +++ b/Command/Migrate.hs @@ -31,9 +31,9 @@ start file (key, oldbackend) = do showStart "migrate" file next $ perform file key oldbackend newbackend else stop - where - choosebackend Nothing = Prelude.head <$> orderedList - choosebackend (Just backend) = return backend + where + choosebackend Nothing = Prelude.head <$> orderedList + choosebackend (Just backend) = return backend {- Checks if a key is upgradable to a newer representation. -} {- Ideally, all keys have file size metadata. Old keys may not. -} @@ -49,10 +49,10 @@ perform file oldkey oldbackend newbackend = do ( maybe stop go =<< genkey , stop ) - where - go newkey = stopUnless (Command.ReKey.linkKey oldkey newkey) $ - next $ Command.ReKey.cleanup file oldkey newkey - genkey = do - content <- inRepo $ gitAnnexLocation oldkey - let source = KeySource { keyFilename = file, contentLocation = content } - liftM fst <$> genKey source (Just newbackend) + where + go newkey = stopUnless (Command.ReKey.linkKey oldkey newkey) $ + next $ Command.ReKey.cleanup file oldkey newkey + genkey = do + content <- inRepo $ gitAnnexLocation oldkey + let source = KeySource { keyFilename = file, contentLocation = content } + liftM fst <$> genKey source (Just newbackend) diff --git a/Command/Move.hs b/Command/Move.hs index 41daab4..316e419 100644 --- a/Command/Move.hs +++ b/Command/Move.hs @@ -44,9 +44,9 @@ start to from move file (key, _) = do (Nothing, Just dest) -> toStart dest move file key (Just src, Nothing) -> fromStart src move file key (_ , _) -> error "only one of --from or --to can be specified" - where - noAuto = when move $ whenM (Annex.getState Annex.auto) $ error - "--auto is not supported for move" + where + noAuto = when move $ whenM (Annex.getState Annex.auto) $ error + "--auto is not supported for move" showMoveAction :: Bool -> FilePath -> Annex () showMoveAction True file = showStart "move" file @@ -98,15 +98,15 @@ toPerform dest move key file = moveLock move key $ do warning "This could have failed because --fast is enabled." stop Right True -> finish False - where - finish remotechanged = do - when remotechanged $ - Remote.logStatus dest key InfoPresent - if move - then do - whenM (inAnnex key) $ removeAnnex key - next $ Command.Drop.cleanupLocal key - else next $ return True + where + finish remotechanged = do + when remotechanged $ + Remote.logStatus dest key InfoPresent + if move + then do + whenM (inAnnex key) $ removeAnnex key + next $ Command.Drop.cleanupLocal key + else next $ return True {- Moves (or copies) the content of an annexed file from a remote - to the current repository. @@ -118,35 +118,37 @@ fromStart :: Remote -> Bool -> FilePath -> Key -> CommandStart fromStart src move file key | move = go | otherwise = stopUnless (not <$> inAnnex key) go - where - go = stopUnless (fromOk src key) $ do - showMoveAction move file - next $ fromPerform src move key file + where + go = stopUnless (fromOk src key) $ do + showMoveAction move file + next $ fromPerform src move key file + fromOk :: Remote -> Key -> Annex Bool fromOk src key | Remote.hasKeyCheap src = either (const expensive) return =<< Remote.hasKey src key | otherwise = expensive - where - expensive = do - u <- getUUID - remotes <- Remote.keyPossibilities key - return $ u /= Remote.uuid src && elem src remotes + where + expensive = do + u <- getUUID + remotes <- Remote.keyPossibilities key + return $ u /= Remote.uuid src && elem src remotes + fromPerform :: Remote -> Bool -> Key -> FilePath -> CommandPerform fromPerform src move key file = moveLock move key $ ifM (inAnnex key) ( handle move True , handle move =<< go ) - where - go = download (Remote.uuid src) key (Just file) noRetry $ do - showAction $ "from " ++ Remote.name src - getViaTmp key $ Remote.retrieveKeyFile src key (Just file) - handle _ False = stop -- failed - handle False True = next $ return True -- copy complete - handle True True = do -- finish moving - ok <- Remote.removeKey src key - next $ Command.Drop.cleanupRemote key src ok + where + go = download (Remote.uuid src) key (Just file) noRetry $ do + showAction $ "from " ++ Remote.name src + getViaTmp key $ Remote.retrieveKeyFile src key (Just file) + handle _ False = stop -- failed + handle False True = next $ return True -- copy complete + handle True True = do -- finish moving + ok <- Remote.removeKey src key + next $ Command.Drop.cleanupRemote key src ok {- Locks a key in order for it to be moved. - No lock is needed when a key is being copied. -} diff --git a/Command/ReKey.hs b/Command/ReKey.hs index 5bd419c..ea06873 100644 --- a/Command/ReKey.hs +++ b/Command/ReKey.hs @@ -25,13 +25,13 @@ seek = [withPairs start] start :: (FilePath, String) -> CommandStart start (file, keyname) = ifAnnexed file go stop - where - newkey = fromMaybe (error "bad key") $ file2key keyname - go (oldkey, _) - | oldkey == newkey = stop - | otherwise = do - showStart "rekey" file - next $ perform file oldkey newkey + where + newkey = fromMaybe (error "bad key") $ file2key keyname + go (oldkey, _) + | oldkey == newkey = stop + | otherwise = do + showStart "rekey" file + next $ perform file oldkey newkey perform :: FilePath -> Key -> Key -> CommandPerform perform file oldkey newkey = do diff --git a/Command/Reinject.hs b/Command/Reinject.hs index 112b7fa..d346925 100644 --- a/Command/Reinject.hs +++ b/Command/Reinject.hs @@ -27,10 +27,10 @@ start (src:dest:[]) ifAnnexed src (error $ "cannot used annexed file as src: " ++ src) go - where - go = do - showStart "reinject" dest - next $ whenAnnexed (perform src) dest + where + go = do + showStart "reinject" dest + next $ whenAnnexed (perform src) dest start _ = error "specify a src file and a dest file" perform :: FilePath -> FilePath -> (Key, Backend) -> CommandPerform @@ -43,14 +43,14 @@ perform src _dest (key, backend) = do next $ cleanup key , error "not reinjecting" ) - where - -- the file might be on a different filesystem, - -- so mv is used rather than simply calling - -- moveToObjectDir; disk space is also - -- checked this way. - move = getViaTmp key $ \tmp -> - liftIO $ boolSystem "mv" [File src, File tmp] - reject = const $ return "wrong file?" + where + -- the file might be on a different filesystem, + -- so mv is used rather than simply calling + -- moveToObjectDir; disk space is also + -- checked this way. + move = getViaTmp key $ \tmp -> + liftIO $ boolSystem "mv" [File src, File tmp] + reject = const $ return "wrong file?" cleanup :: Key -> CommandCleanup cleanup key = do diff --git a/Command/Status.hs b/Command/Status.hs index ab7dbb0..593e8a0 100644 --- a/Command/Status.hs +++ b/Command/Status.hs @@ -5,11 +5,11 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE PackageImports, BangPatterns #-} module Command.Status where -import Control.Monad.State.Strict +import "mtl" Control.Monad.State.Strict import qualified Data.Map as M import Text.JSON import Data.Tuple @@ -114,10 +114,10 @@ nojson a _ = a showStat :: Stat -> StatState () showStat s = maybe noop calc =<< s - where - calc (desc, a) = do - (lift . showHeader) desc - lift . showRaw =<< a + where + calc (desc, a) = do + (lift . showHeader) desc + lift . showRaw =<< a supported_backends :: Stat supported_backends = stat "supported backends" $ json unwords $ @@ -133,8 +133,8 @@ remote_list level = stat n $ nojson $ lift $ do rs <- fst <$> trustPartition level us s <- prettyPrintUUIDs n rs return $ if null s then "0" else show (length rs) ++ "\n" ++ beginning s - where - n = showTrustLevel level ++ " repositories" + where + n = showTrustLevel level ++ " repositories" local_annex_size :: Stat local_annex_size = stat "local annex size" $ json id $ @@ -182,42 +182,42 @@ transfer_list = stat "transfers in progress" $ nojson $ lift $ do then return "none" else return $ multiLine $ map (\(t, i) -> line uuidmap t i) $ sort ts - where - line uuidmap t i = unwords - [ showLcDirection (transferDirection t) ++ "ing" - , fromMaybe (key2file $ transferKey t) (associatedFile i) - , if transferDirection t == Upload then "to" else "from" - , maybe (fromUUID $ transferUUID t) Remote.name $ - M.lookup (transferUUID t) uuidmap - ] + where + line uuidmap t i = unwords + [ showLcDirection (transferDirection t) ++ "ing" + , fromMaybe (key2file $ transferKey t) (associatedFile i) + , if transferDirection t == Upload then "to" else "from" + , maybe (fromUUID $ transferUUID t) Remote.name $ + M.lookup (transferUUID t) uuidmap + ] disk_size :: Stat disk_size = stat "available local disk space" $ json id $ lift $ calcfree <$> getDiskReserve <*> inRepo (getDiskFree . gitAnnexDir) - where - calcfree reserve (Just have) = unwords - [ roughSize storageUnits False $ nonneg $ have - reserve - , "(+" ++ roughSize storageUnits False reserve - , "reserved)" - ] - - calcfree _ _ = "unknown" - nonneg x - | x >= 0 = x - | otherwise = 0 + where + calcfree reserve (Just have) = unwords + [ roughSize storageUnits False $ nonneg $ have - reserve + , "(+" ++ roughSize storageUnits False reserve + , "reserved)" + ] + calcfree _ _ = "unknown" + + nonneg x + | x >= 0 = x + | otherwise = 0 backend_usage :: Stat backend_usage = stat "backend usage" $ nojson $ calc <$> (backendsKeys <$> cachedReferencedData) <*> (backendsKeys <$> cachedPresentData) - where - calc x y = multiLine $ - map (\(n, b) -> b ++ ": " ++ show n) $ - reverse $ sort $ map swap $ M.toList $ - M.unionWith (+) x y + where + calc x y = multiLine $ + map (\(n, b) -> b ++ ": " ++ show n) $ + reverse $ sort $ map swap $ M.toList $ + M.unionWith (+) x y cachedPresentData :: StatState KeyData cachedPresentData = do @@ -249,39 +249,38 @@ foldKeys = foldl' (flip addKey) emptyKeyData addKey :: Key -> KeyData -> KeyData addKey key (KeyData count size unknownsize backends) = KeyData count' size' unknownsize' backends' - where - {- All calculations strict to avoid thunks when repeatedly - - applied to many keys. -} - !count' = count + 1 - !backends' = M.insertWith' (+) (keyBackendName key) 1 backends - !size' = maybe size (+ size) ks - !unknownsize' = maybe (unknownsize + 1) (const unknownsize) ks - ks = keySize key + where + {- All calculations strict to avoid thunks when repeatedly + - applied to many keys. -} + !count' = count + 1 + !backends' = M.insertWith' (+) (keyBackendName key) 1 backends + !size' = maybe size (+ size) ks + !unknownsize' = maybe (unknownsize + 1) (const unknownsize) ks + ks = keySize key showSizeKeys :: KeyData -> String showSizeKeys d = total ++ missingnote - where - total = roughSize storageUnits False $ sizeKeys d - missingnote - | unknownSizeKeys d == 0 = "" - | otherwise = aside $ - "+ " ++ show (unknownSizeKeys d) ++ - " keys of unknown size" + where + total = roughSize storageUnits False $ sizeKeys d + missingnote + | unknownSizeKeys d == 0 = "" + | otherwise = aside $ + "+ " ++ show (unknownSizeKeys d) ++ + " keys of unknown size" staleSize :: String -> (Git.Repo -> FilePath) -> Stat staleSize label dirspec = go =<< lift (Command.Unused.staleKeys dirspec) - where - go [] = nostat - go keys = onsize =<< sum <$> keysizes keys - onsize 0 = nostat - onsize size = stat label $ - json (++ aside "clean up with git-annex unused") $ - return $ roughSize storageUnits False size - keysizes keys = map (fromIntegral . fileSize) <$> stats keys - stats keys = do - dir <- lift $ fromRepo dirspec - liftIO $ forM keys $ \k -> - getFileStatus (dir keyFile k) + where + go [] = nostat + go keys = onsize =<< sum <$> keysizes keys + onsize 0 = nostat + onsize size = stat label $ + json (++ aside "clean up with git-annex unused") $ + return $ roughSize storageUnits False size + keysizes keys = map (fromIntegral . fileSize) <$> stats keys + stats keys = do + dir <- lift $ fromRepo dirspec + liftIO $ forM keys $ \k -> getFileStatus (dir keyFile k) aside :: String -> String aside s = " (" ++ s ++ ")" diff --git a/Command/Sync.hs b/Command/Sync.hs index 1795a61..f741011 100644 --- a/Command/Sync.hs +++ b/Command/Sync.hs @@ -48,8 +48,8 @@ seek rs = do , [ pushLocal branch ] , [ pushRemote remote branch | remote <- remotes ] ] - where - nobranch = error "no branch is checked out" + where + nobranch = error "no branch is checked out" syncBranch :: Git.Ref -> Git.Ref syncBranch = Git.Ref.under "refs/heads/synced/" @@ -59,23 +59,23 @@ remoteBranch remote = Git.Ref.under $ "refs/remotes/" ++ Remote.name remote syncRemotes :: [String] -> Annex [Remote] syncRemotes rs = ifM (Annex.getState Annex.fast) ( nub <$> pickfast , wanted ) - where - pickfast = (++) <$> listed <*> (good =<< fastest <$> available) - wanted - | null rs = good =<< concat . Remote.byCost <$> available - | otherwise = listed - listed = do - l <- catMaybes <$> mapM (Remote.byName . Just) rs - let s = filter Remote.specialRemote l - unless (null s) $ - error $ "cannot sync special remotes: " ++ - unwords (map Types.Remote.name s) - return l - available = filter (not . Remote.specialRemote) - <$> (filterM (repoSyncable . Types.Remote.repo) - =<< Remote.enabledRemoteList) - good = filterM $ Remote.Git.repoAvail . Types.Remote.repo - fastest = fromMaybe [] . headMaybe . Remote.byCost + where + pickfast = (++) <$> listed <*> (good =<< fastest <$> available) + wanted + | null rs = good =<< concat . Remote.byCost <$> available + | otherwise = listed + listed = do + l <- catMaybes <$> mapM (Remote.byName . Just) rs + let s = filter Remote.specialRemote l + unless (null s) $ + error $ "cannot sync special remotes: " ++ + unwords (map Types.Remote.name s) + return l + available = filter (not . Remote.specialRemote) + <$> (filterM (repoSyncable . Types.Remote.repo) + =<< Remote.enabledRemoteList) + good = filterM $ Remote.Git.repoAvail . Types.Remote.repo + fastest = fromMaybe [] . headMaybe . Remote.byCost commit :: CommandStart commit = do @@ -90,16 +90,16 @@ commit = do mergeLocal :: Git.Ref -> CommandStart mergeLocal branch = go =<< needmerge - where - syncbranch = syncBranch branch - needmerge = do - unlessM (inRepo $ Git.Ref.exists syncbranch) $ - inRepo $ updateBranch syncbranch - inRepo $ Git.Branch.changed branch syncbranch - go False = stop - go True = do - showStart "merge" $ Git.Ref.describe syncbranch - next $ next $ mergeFrom syncbranch + where + syncbranch = syncBranch branch + needmerge = do + unlessM (inRepo $ Git.Ref.exists syncbranch) $ + inRepo $ updateBranch syncbranch + inRepo $ Git.Branch.changed branch syncbranch + go False = stop + go True = do + showStart "merge" $ Git.Ref.describe syncbranch + next $ next $ mergeFrom syncbranch pushLocal :: Git.Ref -> CommandStart pushLocal branch = do @@ -109,11 +109,11 @@ pushLocal branch = do updateBranch :: Git.Ref -> Git.Repo -> IO () updateBranch syncbranch g = unlessM go $ error $ "failed to update " ++ show syncbranch - where - go = Git.Command.runBool "branch" - [ Param "-f" - , Param $ show $ Git.Ref.base syncbranch - ] g + where + go = Git.Command.runBool "branch" + [ Param "-f" + , Param $ show $ Git.Ref.base syncbranch + ] g pullRemote :: Remote -> Git.Ref -> CommandStart pullRemote remote branch = do @@ -122,9 +122,9 @@ pullRemote remote branch = do showOutput stopUnless fetch $ next $ mergeRemote remote (Just branch) - where - fetch = inRepo $ Git.Command.runBool "fetch" - [Param $ Remote.name remote] + where + fetch = inRepo $ Git.Command.runBool "fetch" + [Param $ Remote.name remote] {- The remote probably has both a master and a synced/master branch. - Which to merge from? Well, the master has whatever latest changes @@ -136,22 +136,22 @@ mergeRemote remote b = case b of branch <- inRepo Git.Branch.currentUnsafe all id <$> (mapM merge $ branchlist branch) Just _ -> all id <$> (mapM merge =<< tomerge (branchlist b)) - where - merge = mergeFrom . remoteBranch remote - tomerge branches = filterM (changed remote) branches - branchlist Nothing = [] - branchlist (Just branch) = [branch, syncBranch branch] + where + merge = mergeFrom . remoteBranch remote + tomerge branches = filterM (changed remote) branches + branchlist Nothing = [] + branchlist (Just branch) = [branch, syncBranch branch] pushRemote :: Remote -> Git.Ref -> CommandStart pushRemote remote branch = go =<< needpush - where - needpush = anyM (newer remote) [syncBranch branch, Annex.Branch.name] - go False = stop - go True = do - showStart "push" (Remote.name remote) - next $ next $ do - showOutput - inRepo $ pushBranch remote branch + where + needpush = anyM (newer remote) [syncBranch branch, Annex.Branch.name] + go False = stop + go True = do + showStart "push" (Remote.name remote) + next $ next $ do + showOutput + inRepo $ pushBranch remote branch pushBranch :: Remote -> Git.Ref -> Git.Repo -> IO Bool pushBranch remote branch g = @@ -160,12 +160,12 @@ pushBranch remote branch g = , Param $ refspec Annex.Branch.name , Param $ refspec branch ] g - where - refspec b = concat - [ show $ Git.Ref.base b - , ":" - , show $ Git.Ref.base $ syncBranch b - ] + where + refspec b = concat + [ show $ Git.Ref.base b + , ":" + , show $ Git.Ref.base $ syncBranch b + ] mergeAnnex :: CommandStart mergeAnnex = do @@ -213,37 +213,37 @@ resolveMerge' u withKey LsFiles.valUs $ \keyUs -> withKey LsFiles.valThem $ \keyThem -> go keyUs keyThem | otherwise = return False - where - go keyUs keyThem - | keyUs == keyThem = do - makelink keyUs - return True - | otherwise = do - liftIO $ nukeFile file - Annex.Queue.addCommand "rm" [Params "--quiet -f --"] [file] - makelink keyUs - makelink keyThem - return True - file = LsFiles.unmergedFile u - issymlink select = any (select (LsFiles.unmergedBlobType u) ==) - [Just SymlinkBlob, Nothing] - makelink (Just key) = do - let dest = mergeFile file key - l <- calcGitLink dest key - liftIO $ do - nukeFile dest - createSymbolicLink l dest - Annex.Queue.addCommand "add" [Param "--force", Param "--"] [dest] - makelink _ = noop - withKey select a = do - let msha = select $ LsFiles.unmergedSha u - case msha of - Nothing -> a Nothing - Just sha -> do - key <- fileKey . takeFileName - . encodeW8 . L.unpack - <$> catObject sha - maybe (return False) (a . Just) key + where + go keyUs keyThem + | keyUs == keyThem = do + makelink keyUs + return True + | otherwise = do + liftIO $ nukeFile file + Annex.Queue.addCommand "rm" [Params "--quiet -f --"] [file] + makelink keyUs + makelink keyThem + return True + file = LsFiles.unmergedFile u + issymlink select = any (select (LsFiles.unmergedBlobType u) ==) + [Just SymlinkBlob, Nothing] + makelink (Just key) = do + let dest = mergeFile file key + l <- calcGitLink dest key + liftIO $ do + nukeFile dest + createSymbolicLink l dest + Annex.Queue.addCommand "add" [Param "--force", Param "--"] [dest] + makelink _ = noop + withKey select a = do + let msha = select $ LsFiles.unmergedSha u + case msha of + Nothing -> a Nothing + Just sha -> do + key <- fileKey . takeFileName + . encodeW8 . L.unpack + <$> catObject sha + maybe (return False) (a . Just) key {- The filename to use when resolving a conflicted merge of a file, - that points to a key. @@ -262,13 +262,13 @@ mergeFile :: FilePath -> Key -> FilePath mergeFile file key | doubleconflict = go $ key2file key | otherwise = go $ shortHash $ key2file key - where - varmarker = ".variant-" - doubleconflict = varmarker `isSuffixOf` (dropExtension file) - go v = takeDirectory file - dropExtension (takeFileName file) - ++ varmarker ++ v - ++ takeExtension file + where + varmarker = ".variant-" + doubleconflict = varmarker `isSuffixOf` (dropExtension file) + go v = takeDirectory file + dropExtension (takeFileName file) + ++ varmarker ++ v + ++ takeExtension file shortHash :: String -> String shortHash = take 4 . md5s . md5FilePath diff --git a/Command/Uninit.hs b/Command/Uninit.hs index 6ac3e12..b365e8c 100644 --- a/Command/Uninit.hs +++ b/Command/Uninit.hs @@ -30,13 +30,26 @@ check = do cwd <- liftIO getCurrentDirectory whenM ((/=) <$> liftIO (absPath top) <*> liftIO (absPath cwd)) $ error "can only run uninit from the top of the git repository" - where - current_branch = Git.Ref . Prelude.head . lines <$> revhead - revhead = inRepo $ Git.Command.pipeReadStrict - [Params "rev-parse --abbrev-ref HEAD"] + where + current_branch = Git.Ref . Prelude.head . lines <$> revhead + revhead = inRepo $ Git.Command.pipeReadStrict + [Params "rev-parse --abbrev-ref HEAD"] seek :: [CommandSeek] -seek = [withFilesInGit $ whenAnnexed startUnannex, withNothing start] +seek = [ + withFilesNotInGit $ whenAnnexed startCheckIncomplete, + withFilesInGit $ whenAnnexed startUnannex + , withNothing start + ] + +{- git annex symlinks that are not checked into git could be left by an + - interrupted add. -} +startCheckIncomplete :: FilePath -> (Key, Backend) -> CommandStart +startCheckIncomplete file _ = error $ unlines + [ file ++ " points to annexed content, but is not checked into git." + , "Perhaps this was left behind by an interrupted git annex add?" + , "Not continuing with uninit; either delete or git annex add the file and retry." + ] startUnannex :: FilePath -> (Key, Backend) -> CommandStart startUnannex file info = do @@ -47,13 +60,7 @@ startUnannex file info = do Command.Unannex.start file info start :: CommandStart -start = next perform - -perform :: CommandPerform -perform = next cleanup - -cleanup :: CommandCleanup -cleanup = do +start = next $ next $ do annexdir <- fromRepo gitAnnexDir uninitialize mapM_ removeAnnex =<< getKeysPresent diff --git a/Command/Unlock.hs b/Command/Unlock.hs index f3ffd31..6489fc3 100644 --- a/Command/Unlock.hs +++ b/Command/Unlock.hs @@ -17,8 +17,8 @@ def = [ c "unlock" "unlock files for modification" , c "edit" "same as unlock" ] - where - c n = command n paramPaths seek + where + c n = command n paramPaths seek seek :: [CommandSeek] seek = [withFilesInGit $ whenAnnexed start] diff --git a/Command/Unused.hs b/Command/Unused.hs index 79285f7..c0551dd 100644 --- a/Command/Unused.hs +++ b/Command/Unused.hs @@ -64,27 +64,26 @@ checkUnused = chain 0 , check "bad" staleBadMsg $ staleKeysPrune gitAnnexBadDir , check "tmp" staleTmpMsg $ staleKeysPrune gitAnnexTmpDir ] - where - findunused True = do - showNote "fast mode enabled; only finding stale files" - return [] - findunused False = do - showAction "checking for unused data" - excludeReferenced =<< getKeysPresent - chain _ [] = next $ return True - chain v (a:as) = do - v' <- a v - chain v' as + where + findunused True = do + showNote "fast mode enabled; only finding stale files" + return [] + findunused False = do + showAction "checking for unused data" + excludeReferenced =<< getKeysPresent + chain _ [] = next $ return True + chain v (a:as) = do + v' <- a v + chain v' as checkRemoteUnused :: String -> CommandPerform checkRemoteUnused name = go =<< fromJust <$> Remote.byName (Just name) - where - go r = do - showAction "checking for unused data" - _ <- check "" (remoteUnusedMsg r) (remoteunused r) 0 - next $ return True - remoteunused r = - excludeReferenced <=< loggedKeysFor $ Remote.uuid r + where + go r = do + showAction "checking for unused data" + _ <- check "" (remoteUnusedMsg r) (remoteunused r) 0 + next $ return True + remoteunused r = excludeReferenced <=< loggedKeysFor $ Remote.uuid r check :: FilePath -> ([(Int, Key)] -> String) -> Annex [Key] -> Int -> Annex Int check file msg a c = do @@ -100,9 +99,9 @@ number n (x:xs) = (n+1, x) : number (n+1) xs table :: [(Int, Key)] -> [String] table l = " NUMBER KEY" : map cols l - where - cols (n,k) = " " ++ pad 6 (show n) ++ " " ++ key2file k - pad n s = s ++ replicate (n - length s) ' ' + where + cols (n,k) = " " ++ pad 6 (show n) ++ " " ++ key2file k + pad n s = s ++ replicate (n - length s) ' ' staleTmpMsg :: [(Int, Key)] -> String staleTmpMsg t = unlines $ @@ -129,8 +128,8 @@ remoteUnusedMsg :: Remote -> [(Int, Key)] -> String remoteUnusedMsg r u = unusedMsg' u ["Some annexed data on " ++ name ++ " is not used by any files:"] [dropMsg $ Just r] - where - name = Remote.name r + where + name = Remote.name r dropMsg :: Maybe Remote -> String dropMsg Nothing = dropMsg' "" @@ -159,11 +158,11 @@ dropMsg' s = "\nTo remove unwanted data: git-annex dropunused" ++ s ++ " NUMBER\ -} excludeReferenced :: [Key] -> Annex [Key] excludeReferenced ks = runfilter firstlevel ks >>= runfilter secondlevel - where - runfilter _ [] = return [] -- optimisation - runfilter a l = bloomFilter show l <$> genBloomFilter show a - firstlevel = withKeysReferencedM - secondlevel = withKeysReferencedInGit + where + runfilter _ [] = return [] -- optimisation + runfilter a l = bloomFilter show l <$> genBloomFilter show a + firstlevel = withKeysReferencedM + secondlevel = withKeysReferencedInGit {- Finds items in the first, smaller list, that are not - present in the second, larger list. @@ -174,8 +173,8 @@ excludeReferenced ks = runfilter firstlevel ks >>= runfilter secondlevel exclude :: Ord a => [a] -> [a] -> [a] exclude [] _ = [] -- optimisation exclude smaller larger = S.toList $ remove larger $ S.fromList smaller - where - remove a b = foldl (flip S.delete) b a + where + remove a b = foldl (flip S.delete) b a {- A bloom filter capable of holding half a million keys with a - false positive rate of 1 in 1000 uses around 8 mb of memory, @@ -208,8 +207,8 @@ genBloomFilter convert populate = do bloom <- lift $ newMB (cheapHashes numhashes) numbits _ <- populate $ \v -> lift $ insertMB bloom (convert v) lift $ unsafeFreezeMB bloom - where - lift = liftIO . stToIO + where + lift = liftIO . stToIO bloomFilter :: Hashable t => (v -> t) -> [v] -> Bloom t -> [v] bloomFilter convert l bloom = filter (\k -> convert k `notElemB` bloom) l @@ -218,14 +217,14 @@ bloomFilter convert l bloom = filter (\k -> convert k `notElemB` bloom) l - symlinks in the git repo. -} withKeysReferenced :: v -> (Key -> v -> v) -> Annex v withKeysReferenced initial a = withKeysReferenced' initial folda - where - folda k v = return $ a k v + where + folda k v = return $ a k v {- Runs an action on each referenced key in the git repo. -} withKeysReferencedM :: (Key -> Annex ()) -> Annex () withKeysReferencedM a = withKeysReferenced' () calla - where - calla k _ = a k + where + calla k _ = a k withKeysReferenced' :: v -> (Key -> v -> Annex v) -> Annex v withKeysReferenced' initial a = do @@ -233,54 +232,53 @@ withKeysReferenced' initial a = do r <- go initial files liftIO $ void clean return r - where - getfiles = ifM isBareRepo - ( return ([], return True) - , do - top <- fromRepo Git.repoPath - inRepo $ LsFiles.inRepo [top] - ) - go v [] = return v - go v (f:fs) = do - x <- Backend.lookupFile f - case x of - Nothing -> go v fs - Just (k, _) -> do - !v' <- a k v - go v' fs - + where + getfiles = ifM isBareRepo + ( return ([], return True) + , do + top <- fromRepo Git.repoPath + inRepo $ LsFiles.inRepo [top] + ) + go v [] = return v + go v (f:fs) = do + x <- Backend.lookupFile f + case x of + Nothing -> go v fs + Just (k, _) -> do + !v' <- a k v + go v' fs withKeysReferencedInGit :: (Key -> Annex ()) -> Annex () withKeysReferencedInGit a = do rs <- relevantrefs <$> showref forM_ rs (withKeysReferencedInGitRef a) - where - showref = inRepo $ Git.Command.pipeReadStrict [Param "show-ref"] - relevantrefs = map (Git.Ref . snd) . - nubBy uniqref . - filter ourbranches . - map (separate (== ' ')) . lines - uniqref (x, _) (y, _) = x == y - ourbranchend = '/' : show Annex.Branch.name - ourbranches (_, b) = not (ourbranchend `isSuffixOf` b) - && not ("refs/synced/" `isPrefixOf` b) + where + showref = inRepo $ Git.Command.pipeReadStrict [Param "show-ref"] + relevantrefs = map (Git.Ref . snd) . + nubBy uniqref . + filter ourbranches . + map (separate (== ' ')) . lines + uniqref (x, _) (y, _) = x == y + ourbranchend = '/' : show Annex.Branch.name + ourbranches (_, b) = not (ourbranchend `isSuffixOf` b) + && not ("refs/synced/" `isPrefixOf` b) withKeysReferencedInGitRef :: (Key -> Annex ()) -> Git.Ref -> Annex () withKeysReferencedInGitRef a ref = do showAction $ "checking " ++ Git.Ref.describe ref go <=< inRepo $ LsTree.lsTree ref - where - go [] = noop - go (l:ls) - | isSymLink (LsTree.mode l) = do - content <- encodeW8 . L.unpack - <$> catFile ref (LsTree.file l) - case fileKey (takeFileName content) of - Nothing -> go ls - Just k -> do - a k - go ls - | otherwise = go ls + where + go [] = noop + go (l:ls) + | isSymLink (LsTree.mode l) = do + content <- encodeW8 . L.unpack + <$> catFile ref (LsTree.file l) + case fileKey (takeFileName content) of + Nothing -> go ls + Just k -> do + a k + go ls + | otherwise = go ls {- Looks in the specified directory for bad/tmp keys, and returns a list - of those that might still have value, or might be stale and removable. diff --git a/Command/Version.hs b/Command/Version.hs index 4cc5cb4..907811e 100644 --- a/Command/Version.hs +++ b/Command/Version.hs @@ -29,8 +29,8 @@ start = do putStrLn $ "supported repository versions: " ++ vs supportedVersions putStrLn $ "upgrade supported from repository versions: " ++ vs upgradableVersions stop - where - vs = join " " + where + vs = join " " showPackageVersion :: IO () showPackageVersion = putStrLn $ "git-annex version: " ++ SysConfig.packageversion diff --git a/Command/Vicfg.hs b/Command/Vicfg.hs index 0466c0c..cfe051c 100644 --- a/Command/Vicfg.hs +++ b/Command/Vicfg.hs @@ -75,119 +75,116 @@ setCfg curcfg newcfg = do diffCfg :: Cfg -> Cfg -> (TrustMap, M.Map UUID (S.Set Group), M.Map UUID String) diffCfg curcfg newcfg = (diff cfgTrustMap, diff cfgGroupMap, diff cfgPreferredContentMap) - where - diff f = M.differenceWith (\x y -> if x == y then Nothing else Just x) - (f newcfg) (f curcfg) + where + diff f = M.differenceWith (\x y -> if x == y then Nothing else Just x) + (f newcfg) (f curcfg) genCfg :: Cfg -> M.Map UUID String -> String genCfg cfg descs = unlines $ concat [intro, trust, groups, preferredcontent] - where - intro = - [ com "git-annex configuration" - , com "" - , com "Changes saved to this file will be recorded in the git-annex branch." - , com "" - , com "Lines in this file have the format:" - , com " setting uuid = value" - ] - - trust = settings cfgTrustMap - [ "" - , com "Repository trust configuration" - , com "(Valid trust levels: " ++ - unwords (map showTrustLevel [Trusted .. DeadTrusted]) ++ - ")" - ] - (\(t, u) -> line "trust" u $ showTrustLevel t) - (\u -> lcom $ line "trust" u $ showTrustLevel SemiTrusted) - - groups = settings cfgGroupMap - [ "" - , com "Repository groups" - , com "(Separate group names with spaces)" - ] - (\(s, u) -> line "group" u $ unwords $ S.toList s) - (\u -> lcom $ line "group" u "") - - preferredcontent = settings cfgPreferredContentMap - [ "" - , com "Repository preferred contents" - ] - (\(s, u) -> line "preferred-content" u s) - (\u -> line "preferred-content" u "") - - settings field desc showvals showdefaults = concat - [ desc - , concatMap showvals $ - sort $ map swap $ M.toList $ field cfg - , concatMap (\u -> lcom $ showdefaults u) $ - missing field - ] - - line setting u value = - [ com $ "(for " ++ (fromMaybe "" $ M.lookup u descs) ++ ")" - , unwords [setting, fromUUID u, "=", value] - ] - lcom = map (\l -> if "#" `isPrefixOf` l then l else "#" ++ l) - missing field = S.toList $ M.keysSet descs `S.difference` M.keysSet (field cfg) + where + intro = + [ com "git-annex configuration" + , com "" + , com "Changes saved to this file will be recorded in the git-annex branch." + , com "" + , com "Lines in this file have the format:" + , com " setting uuid = value" + ] + + trust = settings cfgTrustMap + [ "" + , com "Repository trust configuration" + , com "(Valid trust levels: " ++ + unwords (map showTrustLevel [Trusted .. DeadTrusted]) ++ + ")" + ] + (\(t, u) -> line "trust" u $ showTrustLevel t) + (\u -> lcom $ line "trust" u $ showTrustLevel SemiTrusted) + + groups = settings cfgGroupMap + [ "" + , com "Repository groups" + , com "(Separate group names with spaces)" + ] + (\(s, u) -> line "group" u $ unwords $ S.toList s) + (\u -> lcom $ line "group" u "") + + preferredcontent = settings cfgPreferredContentMap + [ "" + , com "Repository preferred contents" + ] + (\(s, u) -> line "preferred-content" u s) + (\u -> line "preferred-content" u "") + + settings field desc showvals showdefaults = concat + [ desc + , concatMap showvals $ sort $ map swap $ M.toList $ field cfg + , concatMap (\u -> lcom $ showdefaults u) $ missing field + ] + + line setting u value = + [ com $ "(for " ++ (fromMaybe "" $ M.lookup u descs) ++ ")" + , unwords [setting, fromUUID u, "=", value] + ] + lcom = map (\l -> if "#" `isPrefixOf` l then l else "#" ++ l) + missing field = S.toList $ M.keysSet descs `S.difference` M.keysSet (field cfg) {- If there's a parse error, returns a new version of the file, - with the problem lines noted. -} parseCfg :: Cfg -> String -> Either String Cfg parseCfg curcfg = go [] curcfg . lines - where - go c cfg [] - | null (catMaybes $ map fst c) = Right cfg - | otherwise = Left $ unlines $ - badheader ++ concatMap showerr (reverse c) - go c cfg (l:ls) = case parse (dropWhile isSpace l) cfg of - Left msg -> go ((Just msg, l):c) cfg ls - Right cfg' -> go ((Nothing, l):c) cfg' ls - - parse l cfg - | null l = Right cfg - | "#" `isPrefixOf` l = Right cfg - | null setting || null u = Left "missing repository uuid" - | otherwise = handle cfg (toUUID u) setting value' - where - (setting, rest) = separate isSpace l - (r, value) = separate (== '=') rest - value' = trimspace value - u = reverse $ trimspace $ - reverse $ trimspace r - trimspace = dropWhile isSpace - - handle cfg u setting value - | setting == "trust" = case readTrustLevel value of - Nothing -> badval "trust value" value - Just t -> - let m = M.insert u t (cfgTrustMap cfg) - in Right $ cfg { cfgTrustMap = m } - | setting == "group" = - let m = M.insert u (S.fromList $ words value) (cfgGroupMap cfg) - in Right $ cfg { cfgGroupMap = m } - | setting == "preferred-content" = - case checkPreferredContentExpression value of - Just e -> Left e - Nothing -> - let m = M.insert u value (cfgPreferredContentMap cfg) - in Right $ cfg { cfgPreferredContentMap = m } - | otherwise = badval "setting" setting - - showerr (Just msg, l) = [parseerr ++ msg, l] - showerr (Nothing, l) - -- filter out the header and parse error lines - -- from any previous parse failure - | any (`isPrefixOf` l) (parseerr:badheader) = [] - | otherwise = [l] - - badval desc val = Left $ "unknown " ++ desc ++ " \"" ++ val ++ "\"" - badheader = - [ com "There was a problem parsing your input." - , com "Search for \"Parse error\" to find the bad lines." - , com "Either fix the bad lines, or delete them (to discard your changes)." - ] - parseerr = com "Parse error in next line: " + where + go c cfg [] + | null (catMaybes $ map fst c) = Right cfg + | otherwise = Left $ unlines $ + badheader ++ concatMap showerr (reverse c) + go c cfg (l:ls) = case parse (dropWhile isSpace l) cfg of + Left msg -> go ((Just msg, l):c) cfg ls + Right cfg' -> go ((Nothing, l):c) cfg' ls + + parse l cfg + | null l = Right cfg + | "#" `isPrefixOf` l = Right cfg + | null setting || null u = Left "missing repository uuid" + | otherwise = handle cfg (toUUID u) setting value' + where + (setting, rest) = separate isSpace l + (r, value) = separate (== '=') rest + value' = trimspace value + u = reverse $ trimspace $ reverse $ trimspace r + trimspace = dropWhile isSpace + + handle cfg u setting value + | setting == "trust" = case readTrustLevel value of + Nothing -> badval "trust value" value + Just t -> + let m = M.insert u t (cfgTrustMap cfg) + in Right $ cfg { cfgTrustMap = m } + | setting == "group" = + let m = M.insert u (S.fromList $ words value) (cfgGroupMap cfg) + in Right $ cfg { cfgGroupMap = m } + | setting == "preferred-content" = + case checkPreferredContentExpression value of + Just e -> Left e + Nothing -> + let m = M.insert u value (cfgPreferredContentMap cfg) + in Right $ cfg { cfgPreferredContentMap = m } + | otherwise = badval "setting" setting + + showerr (Just msg, l) = [parseerr ++ msg, l] + showerr (Nothing, l) + -- filter out the header and parse error lines + -- from any previous parse failure + | any (`isPrefixOf` l) (parseerr:badheader) = [] + | otherwise = [l] + + badval desc val = Left $ "unknown " ++ desc ++ " \"" ++ val ++ "\"" + badheader = + [ com "There was a problem parsing your input." + , com "Search for \"Parse error\" to find the bad lines." + , com "Either fix the bad lines, or delete them (to discard your changes)." + ] + parseerr = com "Parse error in next line: " com :: String -> String com s = "# " ++ s diff --git a/Command/WebApp.hs b/Command/WebApp.hs index f87ea98..a0bd2e7 100644 --- a/Command/WebApp.hs +++ b/Command/WebApp.hs @@ -11,10 +11,7 @@ import Common.Annex import Command import Assistant import Assistant.Common -import Assistant.DaemonStatus -import Assistant.ScanRemotes -import Assistant.TransferQueue -import Assistant.TransferSlots +import Assistant.NamedThread import Assistant.Threads.WebApp import Assistant.WebApp import Assistant.Install @@ -46,24 +43,24 @@ start' allowauto = notBareRepo $ do liftIO $ ensureInstalled ifM isInitialized ( go , auto ) stop - where - go = do - browser <- fromRepo webBrowser - f <- liftIO . absPath =<< fromRepo gitAnnexHtmlShim - ifM (checkpid <&&> checkshim f) - ( liftIO $ openBrowser browser f - , startDaemon True True $ Just $ - const $ openBrowser browser - ) - auto - | allowauto = liftIO startNoRepo - | otherwise = do - d <- liftIO getCurrentDirectory - error $ "no git repository in " ++ d - checkpid = do - pidfile <- fromRepo gitAnnexPidFile - liftIO $ isJust <$> checkDaemon pidfile - checkshim f = liftIO $ doesFileExist f + where + go = do + browser <- fromRepo webBrowser + f <- liftIO . absPath =<< fromRepo gitAnnexHtmlShim + ifM (checkpid <&&> checkshim f) + ( liftIO $ openBrowser browser f + , startDaemon True True $ Just $ + const $ openBrowser browser + ) + auto + | allowauto = liftIO startNoRepo + | otherwise = do + d <- liftIO getCurrentDirectory + error $ "no git repository in " ++ d + checkpid = do + pidfile <- fromRepo gitAnnexPidFile + liftIO $ isJust <$> checkDaemon pidfile + checkshim f = liftIO $ doesFileExist f {- When run without a repo, see if there is an autoStartFile, - and if so, start the first available listed repository. @@ -99,46 +96,50 @@ autoStart autostartfile = do -} firstRun :: IO () firstRun = do + {- Without a repository, we cannot have an Annex monad, so cannot + - get a ThreadState. Using undefined is only safe because the + - webapp checks its noAnnex field before accessing the + - threadstate. -} + let st = undefined + {- Get a DaemonStatus without running in the Annex monad. -} dstatus <- atomically . newTMVar =<< newDaemonStatus - scanremotes <- newScanRemoteMap - transferqueue <- newTransferQueue - transferslots <- newTransferSlots + d <- newAssistantData st dstatus urlrenderer <- newUrlRenderer v <- newEmptyMVar let callback a = Just $ a v - void $ runNamedThread dstatus $ - webAppThread Nothing dstatus scanremotes - transferqueue transferslots urlrenderer - (callback signaler) (callback mainthread) - where - signaler v = do - putMVar v "" - takeMVar v - mainthread v _url htmlshim = do - browser <- maybe Nothing webBrowser <$> Git.Config.global - openBrowser browser htmlshim - - _wait <- takeMVar v + void $ runAssistant d $ runNamedThread $ + webAppThread d urlrenderer True + (callback signaler) + (callback mainthread) + where + signaler v = do + putMVar v "" + takeMVar v + mainthread v _url htmlshim = do + browser <- maybe Nothing webBrowser <$> Git.Config.global + openBrowser browser htmlshim - state <- Annex.new =<< Git.CurrentRepo.get - Annex.eval state $ do - dummydaemonize - startAssistant True id $ Just $ sendurlback v - sendurlback v url _htmlshim = putMVar v url - {- Set up the pid file in the new repo. -} - dummydaemonize = - liftIO . lockPidFile =<< fromRepo gitAnnexPidFile + _wait <- takeMVar v + + state <- Annex.new =<< Git.CurrentRepo.get + Annex.eval state $ do + dummydaemonize + startAssistant True id $ Just $ sendurlback v + sendurlback v url _htmlshim = putMVar v url + + {- Set up the pid file in the new repo. -} + dummydaemonize = liftIO . lockPidFile =<< fromRepo gitAnnexPidFile openBrowser :: Maybe FilePath -> FilePath -> IO () openBrowser cmd htmlshim = go $ maybe runBrowser runCustomBrowser cmd - where - url = fileUrl htmlshim - go a = do - putStrLn "" - putStrLn $ "Launching web browser on " ++ url - unlessM (a url) $ - error $ "failed to start web browser" - runCustomBrowser c u = boolSystem c [Param u] + where + url = fileUrl htmlshim + go a = do + putStrLn "" + putStrLn $ "Launching web browser on " ++ url + unlessM (a url) $ + error $ "failed to start web browser" + runCustomBrowser c u = boolSystem c [Param u] {- web.browser is a generic git config setting for a web browser program -} webBrowser :: Git.Repo -> Maybe FilePath diff --git a/Command/Whereis.hs b/Command/Whereis.hs index c77b3a0..251c4ec 100644 --- a/Command/Whereis.hs +++ b/Command/Whereis.hs @@ -40,15 +40,15 @@ perform remotemap key = do forM_ (mapMaybe (`M.lookup` remotemap) locations) $ performRemote key if null safelocations then stop else next $ return True - where - copiesplural 1 = "copy" - copiesplural _ = "copies" - untrustedheader = "The following untrusted locations may also have copies:\n" + where + copiesplural 1 = "copy" + copiesplural _ = "copies" + untrustedheader = "The following untrusted locations may also have copies:\n" performRemote :: Key -> Remote -> Annex () performRemote key remote = maybe noop go $ whereisKey remote - where - go a = do - ls <- a key - unless (null ls) $ showLongNote $ unlines $ - map (\l -> name remote ++ ": " ++ l) ls + where + go a = do + ls <- a key + unless (null ls) $ showLongNote $ unlines $ + map (\l -> name remote ++ ": " ++ l) ls diff --git a/Command/XMPPGit.hs b/Command/XMPPGit.hs new file mode 100644 index 0000000..c54d6a8 --- /dev/null +++ b/Command/XMPPGit.hs @@ -0,0 +1,42 @@ +{- git-annex command + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Command.XMPPGit where + +import Common.Annex +import Command +import Assistant.XMPP.Git + +def :: [Command] +def = [noCommit $ noRepo xmppGitRelay $ dontCheck repoExists $ + command "xmppgit" paramNothing seek "git to XMPP relay (internal use)"] + +seek :: [CommandSeek] +seek = [withWords start] + +start :: [String] -> CommandStart +start _ = do + liftIO gitRemoteHelper + liftIO xmppGitRelay + stop + +{- A basic implementation of the git-remote-helpers protocol. -} +gitRemoteHelper :: IO () +gitRemoteHelper = do + expect "capabilities" + respond ["connect"] + expect "connect git-receive-pack" + respond [] + where + expect s = do + cmd <- getLine + unless (cmd == s) $ + error $ "git-remote-helpers protocol error: expected: " ++ s ++ ", but got: " ++ cmd + respond l = do + mapM_ putStrLn l + putStrLn "" + hFlush stdout diff --git a/Common.hs b/Common.hs index 04ec1e0..5b53f37 100644 --- a/Common.hs +++ b/Common.hs @@ -1,9 +1,11 @@ +{-# LANGUAGE PackageImports #-} + module Common (module X) where import Control.Monad as X hiding (join) import Control.Monad.IfElse as X import Control.Applicative as X -import Control.Monad.State.Strict as X (liftIO) +import "mtl" Control.Monad.State.Strict as X (liftIO) import Control.Exception.Extensible as X (IOException) import Data.Maybe as X diff --git a/Config.hs b/Config.hs index 04ab665..1077730 100644 --- a/Config.hs +++ b/Config.hs @@ -95,24 +95,24 @@ repoSyncable r = fromMaybe True . Git.Config.isTrue - in git config. forcenumcopies overrides everything. -} getNumCopies :: Maybe Int -> Annex Int getNumCopies v = perhaps (use v) =<< Annex.getState Annex.forcenumcopies - where - use (Just n) = return n - use Nothing = perhaps (return 1) =<< - readish <$> getConfig (annexConfig "numcopies") "1" - perhaps fallback = maybe fallback (return . id) + where + use (Just n) = return n + use Nothing = perhaps (return 1) =<< + readish <$> getConfig (annexConfig "numcopies") "1" + perhaps fallback = maybe fallback (return . id) {- Gets the trust level set for a remote in git config. -} getTrustLevel :: Git.Repo -> Annex (Maybe String) getTrustLevel r = fromRepo $ Git.Config.getMaybe key - where - (ConfigKey key) = remoteConfig r "trustlevel" + where + (ConfigKey key) = remoteConfig r "trustlevel" {- Gets annex.diskreserve setting. -} getDiskReserve :: Annex Integer getDiskReserve = fromMaybe megabyte . readSize dataUnits <$> getConfig (annexConfig "diskreserve") "" - where - megabyte = 1000000 + where + megabyte = 1000000 {- Gets annex.httpheaders or annex.httpheaders-command setting, - splitting it into lines. -} diff --git a/Crypto.hs b/Crypto.hs index 3387be1..071fb7a 100644 --- a/Crypto.hs +++ b/Crypto.hs @@ -75,16 +75,16 @@ updateEncryptedCipher keyid encipher@(EncryptedCipher _ ks) = do ks' <- Gpg.findPubKeys keyid cipher <- decryptCipher encipher encryptCipher cipher (merge ks ks') - where - merge (KeyIds a) (KeyIds b) = KeyIds $ a ++ b + where + merge (KeyIds a) (KeyIds b) = KeyIds $ a ++ b describeCipher :: StorableCipher -> String describeCipher (SharedCipher _) = "shared cipher" describeCipher (EncryptedCipher _ (KeyIds ks)) = "with gpg " ++ keys ks ++ " " ++ unwords ks - where - keys [_] = "key" - keys _ = "keys" + where + keys [_] = "key" + keys _ = "keys" {- Encrypts a Cipher to the specified KeyIds. -} encryptCipher :: Cipher -> KeyIds -> IO StorableCipher @@ -92,20 +92,20 @@ encryptCipher (Cipher c) (KeyIds ks) = do let ks' = nub $ sort ks -- gpg complains about duplicate recipient keyids encipher <- Gpg.pipeStrict (encrypt++recipients ks') c return $ EncryptedCipher encipher (KeyIds ks') - where - encrypt = [ Params "--encrypt" ] - recipients l = force_recipients : - concatMap (\k -> [Param "--recipient", Param k]) l - -- Force gpg to only encrypt to the specified - -- recipients, not configured defaults. - force_recipients = Params "--no-encrypt-to --no-default-recipient" + where + encrypt = [ Params "--encrypt" ] + recipients l = force_recipients : + concatMap (\k -> [Param "--recipient", Param k]) l + -- Force gpg to only encrypt to the specified + -- recipients, not configured defaults. + force_recipients = Params "--no-encrypt-to --no-default-recipient" {- Decrypting an EncryptedCipher is expensive; the Cipher should be cached. -} decryptCipher :: StorableCipher -> IO Cipher decryptCipher (SharedCipher t) = return $ Cipher t decryptCipher (EncryptedCipher t _) = Cipher <$> Gpg.pipeStrict decrypt t - where - decrypt = [ Param "--decrypt" ] + where + decrypt = [ Param "--decrypt" ] {- Generates an encrypted form of a Key. The encryption does not need to be - reversable, nor does it need to be the same type of encryption used @@ -136,8 +136,12 @@ withEncryptedContent = pass withEncryptedHandle withDecryptedContent :: Cipher -> IO L.ByteString -> (L.ByteString -> IO a) -> IO a withDecryptedContent = pass withDecryptedHandle -pass :: (Cipher -> IO L.ByteString -> (Handle -> IO a) -> IO a) - -> Cipher -> IO L.ByteString -> (L.ByteString -> IO a) -> IO a +pass + :: (Cipher -> IO L.ByteString -> (Handle -> IO a) -> IO a) + -> Cipher + -> IO L.ByteString + -> (L.ByteString -> IO a) + -> IO a pass to n s a = to n s $ a <=< L.hGetContents hmacWithCipher :: Cipher -> String -> String @@ -148,5 +152,5 @@ hmacWithCipher' c s = showDigest $ hmacSha1 (fromString c) (fromString s) {- Ensure that hmacWithCipher' returns the same thing forevermore. -} prop_hmacWithCipher_sane :: Bool prop_hmacWithCipher_sane = known_good == hmacWithCipher' "foo" "bar" - where - known_good = "46b4ec586117154dacd49d664e5d63fdc88efb51" + where + known_good = "46b4ec586117154dacd49d664e5d63fdc88efb51" diff --git a/Git.hs b/Git.hs index eab33f1..46f995e 100644 --- a/Git.hs +++ b/Git.hs @@ -81,8 +81,8 @@ repoIsSsh Repo { location = Url url } | scheme == "git+ssh:" = True | scheme == "ssh+git:" = True | otherwise = False - where - scheme = uriScheme url + where + scheme = uriScheme url repoIsSsh _ = False repoIsHttp :: Repo -> Bool @@ -126,5 +126,5 @@ hookPath script repo = do let hook = localGitDir repo "hooks" script ifM (catchBoolIO $ isexecutable hook) ( return $ Just hook , return Nothing ) - where - isexecutable f = isExecutable . fileMode <$> getFileStatus f + where + isexecutable f = isExecutable . fileMode <$> getFileStatus f diff --git a/Git/Command.hs b/Git/Command.hs index 5f2dd47..37df447 100644 --- a/Git/Command.hs +++ b/Git/Command.hs @@ -7,7 +7,6 @@ module Git.Command where -import System.Posix.Process (getAnyProcessStatus) import System.Process (std_out, env) import Common @@ -97,17 +96,6 @@ pipeNullSplitZombie params repo = leaveZombie <$> pipeNullSplit params repo leaveZombie :: (a, IO Bool) -> a leaveZombie = fst -{- Reaps any zombie git processes. - - - - Warning: Not thread safe. Anything that was expecting to wait - - on a process and get back an exit status is going to be confused - - if this reap gets there first. -} -reap :: IO () -reap = do - -- throws an exception when there are no child processes - catchDefaultIO Nothing (getAnyProcessStatus False True) - >>= maybe noop (const reap) - {- Runs a git command as a coprocess. -} gitCoProcessStart :: [CommandParam] -> Repo -> IO CoProcess.CoProcessHandle gitCoProcessStart params repo = CoProcess.start "git" (toCommand $ gitCommandLine params repo) (gitEnv repo) diff --git a/Git/Config.hs b/Git/Config.hs index cc9b27b..0d6d67f 100644 --- a/Git/Config.hs +++ b/Git/Config.hs @@ -15,6 +15,7 @@ import Common import Git import Git.Types import qualified Git.Construct +import Utility.UserInfo {- Returns a single git config setting, or a default value if not set. -} get :: String -> String -> Repo -> String diff --git a/Git/Construct.hs b/Git/Construct.hs index 3d39b08..e367c09 100644 --- a/Git/Construct.hs +++ b/Git/Construct.hs @@ -27,6 +27,7 @@ import Common import Git.Types import Git import qualified Git.Url as Url +import Utility.UserInfo {- Finds the git repository used for the cwd, which may be in a parent - directory. -} @@ -158,7 +159,10 @@ fromRemoteLocation s repo = gen $ calcloc s (prefix, suffix) = ("url." , ".insteadof") urlstyle v = isURI v || ":" `isInfixOf` v && "//" `isInfixOf` v -- git remotes can be written scp style -- [user@]host:dir - scpstyle v = ":" `isInfixOf` v && not ("//" `isInfixOf` v) + -- but foo::bar is a git-remote-helper location instead + scpstyle v = ":" `isInfixOf` v + && not ("//" `isInfixOf` v) + && not ("::" `isInfixOf` v) scptourl v = "ssh://" ++ host ++ slash dir where (host, dir) = separate (== ':') v diff --git a/Git/CurrentRepo.hs b/Git/CurrentRepo.hs index 908cc38..29bb281 100644 --- a/Git/CurrentRepo.hs +++ b/Git/CurrentRepo.hs @@ -30,7 +30,7 @@ import qualified Git.Config get :: IO Repo get = do gd <- pathenv "GIT_DIR" - r <- configure gd =<< maybe fromCwd fromPath gd + r <- configure gd =<< fromCwd wt <- maybe (worktree $ location r) Just <$> pathenv "GIT_WORK_TREE" case wt of Nothing -> return r diff --git a/Git/LsFiles.hs b/Git/LsFiles.hs index 5dd988f..4f8ac3f 100644 --- a/Git/LsFiles.hs +++ b/Git/LsFiles.hs @@ -120,17 +120,19 @@ data InternalUnmerged = InternalUnmerged parseUnmerged :: String -> Maybe InternalUnmerged parseUnmerged s - | null file || length ws < 3 = Nothing - | otherwise = do - stage <- readish (ws !! 2) :: Maybe Int - unless (stage == 2 || stage == 3) $ - fail undefined -- skip stage 1 - blobtype <- readBlobType (ws !! 0) - sha <- extractSha (ws !! 1) - return $ InternalUnmerged (stage == 2) file (Just blobtype) (Just sha) + | null file = Nothing + | otherwise = case words metadata of + (rawblobtype:rawsha:rawstage:_) -> do + stage <- readish rawstage :: Maybe Int + unless (stage == 2 || stage == 3) $ + fail undefined -- skip stage 1 + blobtype <- readBlobType rawblobtype + sha <- extractSha rawsha + return $ InternalUnmerged (stage == 2) file + (Just blobtype) (Just sha) + _ -> Nothing where (metadata, file) = separate (== '\t') s - ws = words metadata reduceUnmerged :: [Unmerged] -> [InternalUnmerged] -> [Unmerged] reduceUnmerged c [] = c diff --git a/Git/LsTree.hs b/Git/LsTree.hs index dc03b88..64187b8 100644 --- a/Git/LsTree.hs +++ b/Git/LsTree.hs @@ -8,6 +8,7 @@ module Git.LsTree ( TreeItem(..), lsTree, + lsTreeFiles, parseLsTree ) where @@ -27,11 +28,16 @@ data TreeItem = TreeItem , file :: FilePath } deriving Show -{- Lists the contents of a Ref -} +{- Lists the complete contents of a tree. -} lsTree :: Ref -> Repo -> IO [TreeItem] lsTree t repo = map parseLsTree <$> pipeNullSplitZombie [Params "ls-tree --full-tree -z -r --", File $ show t] repo +{- Lists specified files in a tree. -} +lsTreeFiles :: Ref -> [FilePath] -> Repo -> IO [TreeItem] +lsTreeFiles t fs repo = map parseLsTree <$> + pipeNullSplitZombie ([Params "ls-tree -z --", File $ show t] ++ map File fs) repo + {- Parses a line of ls-tree output. - (The --long format is not currently supported.) -} parseLsTree :: String -> TreeItem diff --git a/Git/Remote.hs b/Git/Remote.hs new file mode 100644 index 0000000..5640e9f --- /dev/null +++ b/Git/Remote.hs @@ -0,0 +1,33 @@ +{- git remote stuff + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Git.Remote where + +import Common +import Data.Char + +{- Construct a legal git remote name out of an arbitrary input string. + - + - There seems to be no formal definition of this in the git source, + - just some ad-hoc checks, and some other things that fail with certian + - types of names (like ones starting with '-'). + -} +makeLegalName :: String -> String +makeLegalName s = case filter legal $ replace "/" "_" s of + -- it can't be empty + [] -> "unnamed" + -- it can't start with / or - or . + '.':s' -> makeLegalName s' + '/':s' -> makeLegalName s' + '-':s' -> makeLegalName s' + s' -> s' + where + {- Only alphanumerics, and a few common bits of punctuation common + - in hostnames. -} + legal '_' = True + legal '.' = True + legal c = isAlphaNum c diff --git a/GitAnnex.hs b/GitAnnex.hs index c35846d..81667ee 100644 --- a/GitAnnex.hs +++ b/GitAnnex.hs @@ -64,14 +64,17 @@ import qualified Command.Import import qualified Command.Map import qualified Command.Upgrade import qualified Command.Version +import qualified Command.Help #ifdef WITH_ASSISTANT import qualified Command.Watch import qualified Command.Assistant #ifdef WITH_WEBAPP import qualified Command.WebApp #endif +#ifdef WITH_XMPP +import qualified Command.XMPPGit +#endif #endif -import qualified Command.Help cmds :: [Command] cmds = concat @@ -117,14 +120,17 @@ cmds = concat , Command.Map.def , Command.Upgrade.def , Command.Version.def + , Command.Help.def #ifdef WITH_ASSISTANT , Command.Watch.def , Command.Assistant.def #ifdef WITH_WEBAPP , Command.WebApp.def #endif +#ifdef WITH_XMPP + , Command.XMPPGit.def +#endif #endif - , Command.Help.def ] options :: [Option] @@ -158,12 +164,13 @@ options = Option.common ++ , Option ['T'] ["time-limit"] (ReqArg Limit.addTimeLimit paramTime) "stop after the specified amount of time" ] ++ Option.matcher - where - setnumcopies v = Annex.changeState $ \s -> s { Annex.forcenumcopies = readish v } - setgitconfig :: String -> Annex () - setgitconfig v = do - newg <- inRepo $ Git.Config.store v - Annex.changeState $ \s -> s { Annex.repo = newg } + where + setnumcopies v = Annex.changeState $ + \s -> s { Annex.forcenumcopies = readish v } + setgitconfig :: String -> Annex () + setgitconfig v = do + newg <- inRepo $ Git.Config.store v + Annex.changeState $ \s -> s { Annex.repo = newg } header :: String header = "Usage: git-annex command [option ..]" diff --git a/GitAnnexShell.hs b/GitAnnexShell.hs index dc15a6c..f77347a 100644 --- a/GitAnnexShell.hs +++ b/GitAnnexShell.hs @@ -1,13 +1,13 @@ {- git-annex-shell main program - - - Copyright 2010 Joey Hess + - Copyright 2010-2012 Joey Hess - - Licensed under the GNU GPL version 3 or higher. -} module GitAnnexShell where -import System.Environment +import System.Posix.Env import System.Console.GetOpt import Common.Annex @@ -17,6 +17,7 @@ import Command import Annex.UUID import qualified Option import Fields +import Utility.UserInfo import qualified Command.ConfigList import qualified Command.InAnnex @@ -43,24 +44,22 @@ cmds_notreadonly = concat cmds :: [Command] cmds = map adddirparam $ cmds_readonly ++ cmds_notreadonly - where - adddirparam c = c - { cmdparamdesc = "DIRECTORY " ++ cmdparamdesc c - } + where + adddirparam c = c { cmdparamdesc = "DIRECTORY " ++ cmdparamdesc c } options :: [OptDescr (Annex ())] options = Option.common ++ [ Option [] ["uuid"] (ReqArg checkuuid paramUUID) "local repository uuid" ] - where - checkuuid expected = getUUID >>= check - where - check u | u == toUUID expected = noop - check NoUUID = unexpected "uninitialized repository" - check u = unexpected $ "UUID " ++ fromUUID u - unexpected s = error $ - "expected repository UUID " ++ - expected ++ " but found " ++ s + where + checkuuid expected = getUUID >>= check + where + check u | u == toUUID expected = noop + check NoUUID = unexpected "uninitialized repository" + check u = unexpected $ "UUID " ++ fromUUID u + unexpected s = error $ + "expected repository UUID " ++ + expected ++ " but found " ++ s header :: String header = "Usage: git-annex-shell [-c] command [parameters ...] [option ..]" @@ -86,6 +85,7 @@ builtins = map cmdname cmds builtin :: String -> String -> [String] -> IO () builtin cmd dir params = do checkNotReadOnly cmd + checkDirectory $ Just dir let (params', fieldparams) = partitionParams params let fields = filter checkField $ parseFields fieldparams dispatch False (cmd : params') cmds options fields header $ @@ -93,6 +93,10 @@ builtin cmd dir params = do external :: [String] -> IO () external params = do + {- Normal git-shell commands all have the directory as their last + - parameter. -} + let lastparam = lastMaybe =<< shellUnEscape <$> lastMaybe params + checkDirectory lastparam checkNotLimited unlessM (boolSystem "git-shell" $ map Param $ "-c":fst (partitionParams params)) $ error "git-shell failed" @@ -131,7 +135,40 @@ checkNotReadOnly cmd | cmd `elem` map cmdname cmds_readonly = noop | otherwise = checkEnv "GIT_ANNEX_SHELL_READONLY" +checkDirectory :: Maybe FilePath -> IO () +checkDirectory mdir = do + v <- getEnv "GIT_ANNEX_SHELL_DIRECTORY" + case (v, mdir) of + (Nothing, _) -> noop + (Just d, Nothing) -> req d Nothing + (Just d, Just dir) + | d `equalFilePath` dir -> noop + | otherwise -> do + home <- myHomeDir + d' <- canondir home d + dir' <- canondir home dir + if d' `equalFilePath` dir' + then noop + else req d' (Just dir') + where + req d mdir' = error $ unwords + [ "Only allowed to access" + , d + , maybe "and could not determine directory from command line" ("not " ++) mdir' + ] + + {- A directory may start with ~/ or in some cases, even /~/, + - or could just be relative to home, or of course could + - be absolute. -} + canondir home d + | "~/" `isPrefixOf` d = return d + | "/~/" `isPrefixOf` d = return $ drop 1 d + | otherwise = relHome $ absPathFrom home d + checkEnv :: String -> IO () -checkEnv var = - whenM (not . null <$> catchDefaultIO "" (getEnv var)) $ - error $ "Action blocked by " ++ var +checkEnv var = do + v <- getEnv var + case v of + Nothing -> noop + Just "" -> noop + Just _ -> error $ "Action blocked by " ++ var diff --git a/INSTALL b/INSTALL index 7e88fc0..40a526c 100644 --- a/INSTALL +++ b/INSTALL @@ -11,7 +11,6 @@ detailed instructions | quick install [[ArchLinux]] | `yaourt -Sy git-annex` [[NixOS]] | `nix-env -i git-annex` [[Gentoo]] | `emerge git-annex` -[[NixOS]] | `nix install git-annex` [[ScientificLinux5]] | (and other RHEL5 clones like CentOS5) [[openSUSE]] | Windows | [[sorry, Windows not supported yet|todo/windows_support]] diff --git a/Init.hs b/Init.hs index aae1016..effa61e 100644 --- a/Init.hs +++ b/Init.hs @@ -20,20 +20,16 @@ import qualified Annex.Branch import Logs.UUID import Annex.Version import Annex.UUID - -import System.Posix.User +import Utility.UserInfo genDescription :: Maybe String -> Annex String genDescription (Just d) = return d genDescription Nothing = do hostname <- maybe "" id <$> liftIO getHostname let at = if null hostname then "" else "@" - username <- clicketyclickety + username <- liftIO myUserName reldir <- liftIO . relHome =<< fromRepo Git.repoPath return $ concat [username, at, hostname, ":", reldir] - where - clicketyclickety = liftIO $ userName <$> - (getUserEntryForID =<< getEffectiveUserID) initialize :: Maybe String -> Annex () initialize mdescription = do @@ -56,11 +52,11 @@ uninitialize = do repos that did not intend to use it. -} ensureInitialized :: Annex () ensureInitialized = getVersion >>= maybe needsinit checkVersion - where - needsinit = ifM Annex.Branch.hasSibling - ( initialize Nothing - , error "First run: git-annex init" - ) + where + needsinit = ifM Annex.Branch.hasSibling + ( initialize Nothing + , error "First run: git-annex init" + ) {- Checks if a repository is initialized. Does not check version for ugrade. -} isInitialized :: Annex Bool diff --git a/Limit.hs b/Limit.hs index f39e2d6..e9c9901 100644 --- a/Limit.hs +++ b/Limit.hs @@ -28,7 +28,7 @@ import Logs.Group import Utility.HumanTime import Utility.DataUnits -type MatchFiles = AssumeNotPresent -> FilePath -> Annex Bool +type MatchFiles = AssumeNotPresent -> Annex.FileInfo -> Annex Bool type MkLimit = String -> Either String MatchFiles type AssumeNotPresent = S.Set UUID @@ -38,10 +38,10 @@ limited = (not . Utility.Matcher.matchesAny) <$> getMatcher' {- Gets a matcher for the user-specified limits. The matcher is cached for - speed; once it's obtained the user-specified limits can't change. -} -getMatcher :: Annex (FilePath -> Annex Bool) +getMatcher :: Annex (Annex.FileInfo -> Annex Bool) getMatcher = Utility.Matcher.matchM <$> getMatcher' -getMatcher' :: Annex (Utility.Matcher.Matcher (FilePath -> Annex Bool)) +getMatcher' :: Annex (Utility.Matcher.Matcher (Annex.FileInfo -> Annex Bool)) getMatcher' = do m <- Annex.getState Annex.limit case m of @@ -52,11 +52,11 @@ getMatcher' = do return matcher {- Adds something to the limit list, which is built up reversed. -} -add :: Utility.Matcher.Token (FilePath -> Annex Bool) -> Annex () +add :: Utility.Matcher.Token (Annex.FileInfo -> Annex Bool) -> Annex () add l = Annex.changeState $ \s -> s { Annex.limit = prepend $ Annex.limit s } - where - prepend (Left ls) = Left $ l:ls - prepend _ = error "internal" + where + prepend (Left ls) = Left $ l:ls + prepend _ = error "internal" {- Adds a new token. -} addToken :: String -> Annex () @@ -80,11 +80,12 @@ addExclude = addLimit . limitExclude limitExclude :: MkLimit limitExclude glob = Right $ const $ return . not . matchglob glob -matchglob :: String -> FilePath -> Bool -matchglob glob f = isJust $ match cregex f [] - where - cregex = compile regex [] - regex = '^':wildToRegex glob +matchglob :: String -> Annex.FileInfo -> Bool +matchglob glob (Annex.FileInfo { Annex.matchFile = f }) = + isJust $ match cregex f [] + where + cregex = compile regex [] + regex = '^':wildToRegex glob {- Adds a limit to skip files not believed to be present - in a specfied repository. -} @@ -96,21 +97,35 @@ limitIn name = Right $ \notpresent -> check $ if name == "." then inhere notpresent else inremote notpresent - where - check a = Backend.lookupFile >=> handle a - handle _ Nothing = return False - handle a (Just (key, _)) = a key - inremote notpresent key = do - u <- Remote.nameToUUID name + where + check a = lookupFile >=> handle a + handle _ Nothing = return False + handle a (Just (key, _)) = a key + inremote notpresent key = do + u <- Remote.nameToUUID name + us <- Remote.keyLocations key + return $ u `elem` us && u `S.notMember` notpresent + inhere notpresent key + | S.null notpresent = inAnnex key + | otherwise = do + u <- getUUID + if u `S.member` notpresent + then return False + else inAnnex key + +{- Limit to content that is currently present on a uuid. -} +limitPresent :: Maybe UUID -> MkLimit +limitPresent u _ = Right $ const $ check $ \key -> do + hereu <- getUUID + if u == Just hereu || u == Nothing + then inAnnex key + else do us <- Remote.keyLocations key - return $ u `elem` us && u `S.notMember` notpresent - inhere notpresent key - | S.null notpresent = inAnnex key - | otherwise = do - u <- getUUID - if u `S.member` notpresent - then return False - else inAnnex key + return $ maybe False (`elem` us) u + where + check a = lookupFile >=> handle a + handle _ Nothing = return False + handle a (Just (key, _)) = a key {- Adds a limit to skip files not believed to have the specified number - of copies. -} @@ -124,18 +139,18 @@ limitCopies want = case split ":" want of Nothing -> go n $ checkgroup v [n] -> go n $ const $ return True _ -> Left "bad value for copies" - where - go num good = case readish num of - Nothing -> Left "bad number for copies" - Just n -> Right $ \notpresent -> - Backend.lookupFile >=> handle n good notpresent - handle _ _ _ Nothing = return False - handle n good notpresent (Just (key, _)) = do - us <- filter (`S.notMember` notpresent) - <$> (filterM good =<< Remote.keyLocations key) - return $ length us >= n - checktrust t u = (== t) <$> lookupTrust u - checkgroup g u = S.member g <$> lookupGroups u + where + go num good = case readish num of + Nothing -> Left "bad number for copies" + Just n -> Right $ \notpresent f -> + lookupFile f >>= handle n good notpresent + handle _ _ _ Nothing = return False + handle n good notpresent (Just (key, _)) = do + us <- filter (`S.notMember` notpresent) + <$> (filterM good =<< Remote.keyLocations key) + return $ length us >= n + checktrust t u = (== t) <$> lookupTrust u + checkgroup g u = S.member g <$> lookupGroups u {- Adds a limit to skip files not believed to be present in all - repositories in the specified group. -} @@ -147,27 +162,26 @@ addInAllGroup groupname = do limitInAllGroup :: GroupMap -> MkLimit limitInAllGroup m groupname | S.null want = Right $ const $ const $ return True - | otherwise = Right $ \notpresent -> - Backend.lookupFile >=> check notpresent - where - want = fromMaybe S.empty $ M.lookup groupname $ uuidsByGroup m - check _ Nothing = return False - check notpresent (Just (key, _)) - -- optimisation: Check if a wanted uuid is notpresent. - | not (S.null (S.intersection want notpresent)) = return False - | otherwise = do - present <- S.fromList <$> Remote.keyLocations key - return $ S.null $ want `S.difference` present + | otherwise = Right $ \notpresent -> lookupFile >=> check notpresent + where + want = fromMaybe S.empty $ M.lookup groupname $ uuidsByGroup m + check _ Nothing = return False + check notpresent (Just (key, _)) + -- optimisation: Check if a wanted uuid is notpresent. + | not (S.null (S.intersection want notpresent)) = return False + | otherwise = do + present <- S.fromList <$> Remote.keyLocations key + return $ S.null $ want `S.difference` present {- Adds a limit to skip files not using a specified key-value backend. -} addInBackend :: String -> Annex () addInBackend = addLimit . limitInBackend limitInBackend :: MkLimit -limitInBackend name = Right $ const $ Backend.lookupFile >=> check - where - wanted = Backend.lookupBackendName name - check = return . maybe False ((==) wanted . snd) +limitInBackend name = Right $ const $ lookupFile >=> check + where + wanted = Backend.lookupBackendName name + check = return . maybe False ((==) wanted . snd) {- Adds a limit to skip files that are too large or too small -} addLargerThan :: String -> Annex () @@ -179,10 +193,10 @@ addSmallerThan = addLimit . limitSize (<) limitSize :: (Maybe Integer -> Maybe Integer -> Bool) -> MkLimit limitSize vs s = case readSize dataUnits s of Nothing -> Left "bad size" - Just sz -> Right $ const $ Backend.lookupFile >=> check sz - where - check _ Nothing = return False - check sz (Just (key, _)) = return $ keySize key `vs` Just sz + Just sz -> Right $ const $ lookupFile >=> check sz + where + check _ Nothing = return False + check sz (Just (key, _)) = return $ keySize key `vs` Just sz addTimeLimit :: String -> Annex () addTimeLimit s = do @@ -196,3 +210,6 @@ addTimeLimit s = do warning $ "Time limit (" ++ s ++ ") reached!" liftIO $ exitWith $ ExitFailure 101 else return True + +lookupFile :: Annex.FileInfo -> Annex (Maybe (Key, Backend)) +lookupFile = Backend.lookupFile . Annex.relFile diff --git a/Locations.hs b/Locations.hs index 4bb2a22..3a7c89e 100644 --- a/Locations.hs +++ b/Locations.hs @@ -100,10 +100,10 @@ gitAnnexLocation key r - don't need to do any work to check if the file is - present. -} return $ inrepo $ annexLocation key hashDirMixed - where - inrepo d = Git.localGitDir r d - check locs@(l:_) = fromMaybe l <$> firstM doesFileExist locs - check [] = error "internal" + where + inrepo d = Git.localGitDir r d + check locs@(l:_) = fromMaybe l <$> firstM doesFileExist locs + check [] = error "internal" {- The annex directory of a repository. -} gitAnnexDir :: Git.Repo -> FilePath @@ -204,8 +204,8 @@ gitAnnexAssistantDefaultDir = "annex" {- Checks a symlink target to see if it appears to point to annexed content. -} isLinkToAnnex :: FilePath -> Bool isLinkToAnnex s = ('/':d) `isInfixOf` s || d `isPrefixOf` s - where - d = ".git" objectDir + where + d = ".git" objectDir {- Converts a key into a filename fragment without any directory. - @@ -232,8 +232,8 @@ keyFile key = replace "/" "%" $ replace ":" "&c" $ -} keyPath :: Key -> Hasher -> FilePath keyPath key hasher = hasher key f f - where - f = keyFile key + where + f = keyFile key {- All possibile locations to store a key using different directory hashes. -} keyPaths :: Key -> [FilePath] @@ -249,7 +249,8 @@ fileKey file = file2key $ {- for quickcheck -} prop_idempotent_fileKey :: String -> Bool prop_idempotent_fileKey s = Just k == fileKey (keyFile k) - where k = stubKey { keyName = s, keyBackendName = "test" } + where + k = stubKey { keyName = s, keyBackendName = "test" } {- Two different directory hashes may be used. The mixed case hash - came first, and is fine, except for the problem of case-strict @@ -262,14 +263,14 @@ annexHashes = [hashDirLower, hashDirMixed] hashDirMixed :: Hasher hashDirMixed k = addTrailingPathSeparator $ take 2 dir drop 2 dir - where - dir = take 4 $ display_32bits_as_dir =<< [a,b,c,d] - ABCD (a,b,c,d) = md5 $ md5FilePath $ key2file k + where + dir = take 4 $ display_32bits_as_dir =<< [a,b,c,d] + ABCD (a,b,c,d) = md5 $ md5FilePath $ key2file k hashDirLower :: Hasher hashDirLower k = addTrailingPathSeparator $ take 3 dir drop 3 dir - where - dir = take 6 $ md5s $ md5FilePath $ key2file k + where + dir = take 6 $ md5s $ md5FilePath $ key2file k {- modified version of display_32bits_as_hex from Data.Hash.MD5 - Copyright (C) 2001 Ian Lynagh @@ -277,13 +278,13 @@ hashDirLower k = addTrailingPathSeparator $ take 3 dir drop 3 dir -} display_32bits_as_dir :: Word32 -> String display_32bits_as_dir w = trim $ swap_pairs cs - where - -- Need 32 characters to use. To avoid inaverdently making - -- a real word, use letters that appear less frequently. - chars = ['0'..'9'] ++ "zqjxkmvwgpfZQJXKMVWGPF" - cs = map (\x -> getc $ (shiftR w (6*x)) .&. 31) [0..7] - getc n = chars !! fromIntegral n - swap_pairs (x1:x2:xs) = x2:x1:swap_pairs xs - swap_pairs _ = [] - -- Last 2 will always be 00, so omit. - trim = take 6 + where + -- Need 32 characters to use. To avoid inaverdently making + -- a real word, use letters that appear less frequently. + chars = ['0'..'9'] ++ "zqjxkmvwgpfZQJXKMVWGPF" + cs = map (\x -> getc $ (shiftR w (6*x)) .&. 31) [0..7] + getc n = chars !! fromIntegral n + swap_pairs (x1:x2:xs) = x2:x1:swap_pairs xs + swap_pairs _ = [] + -- Last 2 will always be 00, so omit. + trim = take 6 diff --git a/Logs/Group.hs b/Logs/Group.hs index 9fd7486..a069edc 100644 --- a/Logs/Group.hs +++ b/Logs/Group.hs @@ -6,10 +6,12 @@ -} module Logs.Group ( + groupLog, groupChange, groupSet, lookupGroups, groupMap, + groupMapLoad, getStandardGroup, ) where @@ -47,25 +49,25 @@ groupChange NoUUID _ = error "unknown UUID; cannot modify" groupSet :: UUID -> S.Set Group -> Annex () groupSet u g = groupChange u (const g) -{- Read the groupLog into a map. The map is cached for speed. -} +{- The map is cached for speed. -} groupMap :: Annex GroupMap -groupMap = do - cached <- Annex.getState Annex.groupmap - case cached of - Just m -> return m - Nothing -> do - m <- makeGroupMap . simpleMap . - parseLog (Just . S.fromList . words) <$> - Annex.Branch.get groupLog - Annex.changeState $ \s -> s { Annex.groupmap = Just m } - return m +groupMap = maybe groupMapLoad return =<< Annex.getState Annex.groupmap + +{- Loads the map, updating the cache. -} +groupMapLoad :: Annex GroupMap +groupMapLoad = do + m <- makeGroupMap . simpleMap . + parseLog (Just . S.fromList . words) <$> + Annex.Branch.get groupLog + Annex.changeState $ \s -> s { Annex.groupmap = Just m } + return m makeGroupMap :: M.Map UUID (S.Set Group) -> GroupMap makeGroupMap byuuid = GroupMap byuuid bygroup - where - bygroup = M.fromListWith S.union $ - concat $ map explode $ M.toList byuuid - explode (u, s) = map (\g -> (g, S.singleton u)) (S.toList s) + where + bygroup = M.fromListWith S.union $ + concat $ map explode $ M.toList byuuid + explode (u, s) = map (\g -> (g, S.singleton u)) (S.toList s) {- If a repository is in exactly one standard group, returns it. -} getStandardGroup :: S.Set Group -> Maybe StandardGroup diff --git a/Logs/Location.hs b/Logs/Location.hs index e27ece5..4273710 100644 --- a/Logs/Location.hs +++ b/Logs/Location.hs @@ -47,13 +47,13 @@ loggedKeys = mapMaybe (logFileKey . takeFileName) <$> Annex.Branch.files - they are present for the specified repository. -} loggedKeysFor :: UUID -> Annex [Key] loggedKeysFor u = filterM isthere =<< loggedKeys - where - {- This should run strictly to avoid the filterM - - building many thunks containing keyLocations data. -} - isthere k = do - us <- loggedLocations k - let !there = u `elem` us - return there + where + {- This should run strictly to avoid the filterM + - building many thunks containing keyLocations data. -} + isthere k = do + us <- loggedLocations k + let !there = u `elem` us + return there {- The filename of the log file for a given key. -} logFile :: Key -> String @@ -64,5 +64,5 @@ logFileKey :: FilePath -> Maybe Key logFileKey file | ext == ".log" = fileKey base | otherwise = Nothing - where - (base, ext) = splitAt (length file - 4) file + where + (base, ext) = splitAt (length file - 4) file diff --git a/Logs/PreferredContent.hs b/Logs/PreferredContent.hs index 049d6b8..ddcc2ac 100644 --- a/Logs/PreferredContent.hs +++ b/Logs/PreferredContent.hs @@ -6,9 +6,11 @@ -} module Logs.PreferredContent ( + preferredContentLog, preferredContentSet, isPreferredContent, preferredContentMap, + preferredContentMapLoad, preferredContentMapRaw, checkPreferredContentExpression, setStandardGroup, @@ -46,29 +48,34 @@ preferredContentSet NoUUID _ = error "unknown UUID; cannot modify" {- Checks if a file is preferred content for the specified repository - (or the current repository if none is specified). -} -isPreferredContent :: Maybe UUID -> AssumeNotPresent -> TopFilePath -> Annex Bool +isPreferredContent :: Maybe UUID -> AssumeNotPresent -> FilePath -> Annex Bool isPreferredContent mu notpresent file = do + matchfile <- getTopFilePath <$> inRepo (toTopFilePath file) + let fi = Annex.FileInfo + { Annex.matchFile = matchfile + , Annex.relFile = file + } u <- maybe getUUID return mu m <- preferredContentMap case M.lookup u m of Nothing -> return True - Just matcher -> - Utility.Matcher.matchMrun matcher $ \a -> - a notpresent (getTopFilePath file) + Just matcher -> Utility.Matcher.matchMrun matcher $ \a -> + a notpresent fi -{- Read the preferredContentLog into a map. The map is cached for speed. -} +{- The map is cached for speed. -} preferredContentMap :: Annex Annex.PreferredContentMap -preferredContentMap = do +preferredContentMap = maybe preferredContentMapLoad return + =<< Annex.getState Annex.preferredcontentmap + +{- Loads the map, updating the cache. -} +preferredContentMapLoad :: Annex Annex.PreferredContentMap +preferredContentMapLoad = do groupmap <- groupMap - cached <- Annex.getState Annex.preferredcontentmap - case cached of - Just m -> return m - Nothing -> do - m <- simpleMap - . parseLogWithUUID ((Just .) . makeMatcher groupmap) - <$> Annex.Branch.get preferredContentLog - Annex.changeState $ \s -> s { Annex.preferredcontentmap = Just m } - return m + m <- simpleMap + . parseLogWithUUID ((Just .) . makeMatcher groupmap) + <$> Annex.Branch.get preferredContentLog + Annex.changeState $ \s -> s { Annex.preferredcontentmap = Just m } + return m preferredContentMapRaw :: Annex (M.Map UUID String) preferredContentMapRaw = simpleMap . parseLog Just @@ -83,8 +90,8 @@ makeMatcher groupmap u s | s == "standard" = standardMatcher groupmap u | null (lefts tokens) = Utility.Matcher.generate $ rights tokens | otherwise = matchAll - where - tokens = map (parseToken groupmap) (tokenizeMatcher s) + where + tokens = map (parseToken (Just u) groupmap) (tokenizeMatcher s) {- Standard matchers are pre-defined for some groups. If none is defined, - or a repository is in multiple groups with standard matchers, match all. -} @@ -99,35 +106,35 @@ matchAll = Utility.Matcher.generate [] checkPreferredContentExpression :: String -> Maybe String checkPreferredContentExpression s | s == "standard" = Nothing - | otherwise = case lefts $ map (parseToken emptyGroupMap) (tokenizeMatcher s) of + | otherwise = case lefts $ map (parseToken Nothing emptyGroupMap) (tokenizeMatcher s) of [] -> Nothing l -> Just $ unwords $ map ("Parse failure: " ++) l -parseToken :: GroupMap -> String -> Either String (Utility.Matcher.Token MatchFiles) -parseToken groupmap t +parseToken :: (Maybe UUID) -> GroupMap -> String -> Either String (Utility.Matcher.Token MatchFiles) +parseToken mu groupmap t | any (== t) Utility.Matcher.tokens = Right $ Utility.Matcher.token t - | otherwise = maybe (Left $ "near " ++ show t) use $ M.lookup k m - where - (k, v) = separate (== '=') t - m = M.fromList + | t == "present" = use $ limitPresent mu + | otherwise = maybe (Left $ "near " ++ show t) use $ M.lookup k $ + M.fromList [ ("include", limitInclude) , ("exclude", limitExclude) - , ("in", limitIn) , ("copies", limitCopies) , ("inbackend", limitInBackend) , ("largerthan", limitSize (>)) , ("smallerthan", limitSize (<)) , ("inallgroup", limitInAllGroup groupmap) ] - use a = Utility.Matcher.Operation <$> a v + where + (k, v) = separate (== '=') t + use a = Utility.Matcher.Operation <$> a v {- This is really dumb tokenization; there's no support for quoted values. - Open and close parens are always treated as standalone tokens; - otherwise tokens must be separated by whitespace. -} tokenizeMatcher :: String -> [String] tokenizeMatcher = filter (not . null ) . concatMap splitparens . words - where - splitparens = segmentDelim (`elem` "()") + where + splitparens = segmentDelim (`elem` "()") {- Puts a UUID in a standard group, and sets its preferred content to use - the standard expression for that group, unless something is already set. -} diff --git a/Logs/Presence.hs b/Logs/Presence.hs index e75e1e4..ce5dd57 100644 --- a/Logs/Presence.hs +++ b/Logs/Presence.hs @@ -53,23 +53,23 @@ readLog = parseLog <$$> Annex.Branch.get {- Parses a log file. Unparseable lines are ignored. -} parseLog :: String -> [LogLine] parseLog = mapMaybe (parseline . words) . lines - where - parseline (a:b:c:_) = do - d <- parseTime defaultTimeLocale "%s%Qs" a - s <- parsestatus b - Just $ LogLine (utcTimeToPOSIXSeconds d) s c - parseline _ = Nothing - parsestatus "1" = Just InfoPresent - parsestatus "0" = Just InfoMissing - parsestatus _ = Nothing + where + parseline (a:b:c:_) = do + d <- parseTime defaultTimeLocale "%s%Qs" a + s <- parsestatus b + Just $ LogLine (utcTimeToPOSIXSeconds d) s c + parseline _ = Nothing + parsestatus "1" = Just InfoPresent + parsestatus "0" = Just InfoMissing + parsestatus _ = Nothing {- Generates a log file. -} showLog :: [LogLine] -> String showLog = unlines . map genline - where - genline (LogLine d s i) = unwords [show d, genstatus s, i] - genstatus InfoPresent = "1" - genstatus InfoMissing = "0" + where + genline (LogLine d s i) = unwords [show d, genstatus s, i] + genstatus InfoPresent = "1" + genstatus InfoMissing = "0" {- Generates a new LogLine with the current date. -} logNow :: LogStatus -> String -> Annex LogLine @@ -102,7 +102,7 @@ mapLog :: LogLine -> LogMap -> LogMap mapLog l m | better = M.insert i l m | otherwise = m - where - better = maybe True newer $ M.lookup i m - newer l' = date l' <= date l - i = info l + where + better = maybe True newer $ M.lookup i m + newer l' = date l' <= date l + i = info l diff --git a/Logs/Remote.hs b/Logs/Remote.hs index b75573a..3348059 100644 --- a/Logs/Remote.hs +++ b/Logs/Remote.hs @@ -6,6 +6,7 @@ -} module Logs.Remote ( + remoteLog, readRemoteLog, configSet, keyValToConfig, @@ -47,40 +48,40 @@ showConfig = unwords . configToKeyVal {- Given Strings like "key=value", generates a RemoteConfig. -} keyValToConfig :: [String] -> RemoteConfig keyValToConfig ws = M.fromList $ map (/=/) ws - where - (/=/) s = (k, v) - where - k = takeWhile (/= '=') s - v = configUnEscape $ drop (1 + length k) s + where + (/=/) s = (k, v) + where + k = takeWhile (/= '=') s + v = configUnEscape $ drop (1 + length k) s configToKeyVal :: M.Map String String -> [String] configToKeyVal m = map toword $ sort $ M.toList m - where - toword (k, v) = k ++ "=" ++ configEscape v + where + toword (k, v) = k ++ "=" ++ configEscape v configEscape :: String -> String configEscape = concatMap escape - where - escape c - | isSpace c || c `elem` "&" = "&" ++ show (ord c) ++ ";" - | otherwise = [c] + where + escape c + | isSpace c || c `elem` "&" = "&" ++ show (ord c) ++ ";" + | otherwise = [c] configUnEscape :: String -> String configUnEscape = unescape - where - unescape [] = [] - unescape (c:rest) - | c == '&' = entity rest - | otherwise = c : unescape rest - entity s - | not (null num) && ";" `isPrefixOf` r = - chr (Prelude.read num) : unescape rest - | otherwise = - '&' : unescape s - where - num = takeWhile isNumber s - r = drop (length num) s - rest = drop 1 r + where + unescape [] = [] + unescape (c:rest) + | c == '&' = entity rest + | otherwise = c : unescape rest + entity s + | not (null num) && ";" `isPrefixOf` r = + chr (Prelude.read num) : unescape rest + | otherwise = + '&' : unescape s + where + num = takeWhile isNumber s + r = drop (length num) s + rest = drop 1 r {- for quickcheck -} prop_idempotent_configEscape :: String -> Bool diff --git a/Logs/Transfer.hs b/Logs/Transfer.hs index 3b68eee..0135f32 100644 --- a/Logs/Transfer.hs +++ b/Logs/Transfer.hs @@ -96,6 +96,9 @@ download u key file shouldretry a = runTransfer (Transfer Download u key) file s - - If the transfer action returns False, the transfer info is - left in the failedTransferDir. + - + - An upload can be run from a read-only filesystem, and in this case + - no transfer information or lock file is used. -} runTransfer :: Transfer -> Maybe FilePath -> RetryDecider -> (MeterUpdate -> Annex Bool) -> Annex Bool runTransfer t file shouldretry a = do @@ -106,42 +109,42 @@ runTransfer t file shouldretry a = do bracketIO (prep tfile mode info) (cleanup tfile) (a meter) unless ok $ failed info return ok - where - prep tfile mode info = do - fd <- openFd (transferLockFile tfile) ReadWrite (Just mode) - defaultFileFlags { trunc = True } - locked <- catchMaybeIO $ - setLock fd (WriteLock, AbsoluteSeek, 0, 0) - when (locked == Nothing) $ - error $ "transfer already in progress" - writeTransferInfoFile info tfile - return fd - cleanup tfile fd = do - void $ tryIO $ removeFile tfile - void $ tryIO $ removeFile $ transferLockFile tfile - closeFd fd - failed info = do - failedtfile <- fromRepo $ failedTransferFile t - createAnnexDirectory $ takeDirectory failedtfile - liftIO $ writeTransferInfoFile info failedtfile - retry oldinfo metervar run = do - v <- tryAnnex run - case v of - Right b -> return b - Left _ -> do - b <- getbytescomplete metervar - let newinfo = oldinfo { bytesComplete = Just b } - if shouldretry oldinfo newinfo - then retry newinfo metervar run - else return False - getbytescomplete metervar - | transferDirection t == Upload = - liftIO $ readMVar metervar - | otherwise = do - f <- fromRepo $ gitAnnexTmpLocation (transferKey t) - liftIO $ catchDefaultIO 0 $ - fromIntegral . fileSize - <$> getFileStatus f + where + prep tfile mode info = catchMaybeIO $ do + fd <- openFd (transferLockFile tfile) ReadWrite (Just mode) + defaultFileFlags { trunc = True } + locked <- catchMaybeIO $ + setLock fd (WriteLock, AbsoluteSeek, 0, 0) + when (locked == Nothing) $ + error $ "transfer already in progress" + writeTransferInfoFile info tfile + return fd + cleanup _ Nothing = noop + cleanup tfile (Just fd) = do + void $ tryIO $ removeFile tfile + void $ tryIO $ removeFile $ transferLockFile tfile + closeFd fd + failed info = do + failedtfile <- fromRepo $ failedTransferFile t + createAnnexDirectory $ takeDirectory failedtfile + liftIO $ writeTransferInfoFile info failedtfile + retry oldinfo metervar run = do + v <- tryAnnex run + case v of + Right b -> return b + Left _ -> do + b <- getbytescomplete metervar + let newinfo = oldinfo { bytesComplete = Just b } + if shouldretry oldinfo newinfo + then retry newinfo metervar run + else return False + getbytescomplete metervar + | transferDirection t == Upload = + liftIO $ readMVar metervar + | otherwise = do + f <- fromRepo $ gitAnnexTmpLocation (transferKey t) + liftIO $ catchDefaultIO 0 $ + fromIntegral . fileSize <$> getFileStatus f {- Generates a callback that can be called as transfer progresses to update - the transfer info file. Also returns the file it'll be updating, and a @@ -149,23 +152,23 @@ runTransfer t file shouldretry a = do mkProgressUpdater :: Transfer -> TransferInfo -> Annex (MeterUpdate, FilePath, MVar Integer) mkProgressUpdater t info = do tfile <- fromRepo $ transferFile t - createAnnexDirectory $ takeDirectory tfile + _ <- tryAnnex $ createAnnexDirectory $ takeDirectory tfile mvar <- liftIO $ newMVar 0 return (liftIO . updater tfile mvar, tfile, mvar) - where - updater tfile mvar bytes = modifyMVar_ mvar $ \oldbytes -> do - if (bytes - oldbytes >= mindelta) - then do - let info' = info { bytesComplete = Just bytes } - writeTransferInfoFile info' tfile - return bytes - else return oldbytes - {- The minimum change in bytesComplete that is worth - - updating a transfer info file for is 1% of the total - - keySize, rounded down. -} - mindelta = case keySize (transferKey t) of - Just sz -> sz `div` 100 - Nothing -> 100 * 1024 -- arbitrarily, 100 kb + where + updater tfile mvar bytes = modifyMVar_ mvar $ \oldbytes -> do + if (bytes - oldbytes >= mindelta) + then do + let info' = info { bytesComplete = Just bytes } + _ <- tryIO $ writeTransferInfoFile info' tfile + return bytes + else return oldbytes + {- The minimum change in bytesComplete that is worth + - updating a transfer info file for is 1% of the total + - keySize, rounded down. -} + mindelta = case keySize (transferKey t) of + Just sz -> sz `div` 100 + Nothing -> 100 * 1024 -- arbitrarily, 100 kb startTransferInfo :: Maybe FilePath -> IO TransferInfo startTransferInfo file = TransferInfo @@ -202,25 +205,23 @@ getTransfers = do infos <- mapM checkTransfer transfers return $ map (\(t, Just i) -> (t, i)) $ filter running $ zip transfers infos - where - findfiles = liftIO . mapM dirContentsRecursive - =<< mapM (fromRepo . transferDir) - [Download, Upload] - running (_, i) = isJust i + where + findfiles = liftIO . mapM dirContentsRecursive + =<< mapM (fromRepo . transferDir) [Download, Upload] + running (_, i) = isJust i {- Gets failed transfers for a given remote UUID. -} getFailedTransfers :: UUID -> Annex [(Transfer, TransferInfo)] getFailedTransfers u = catMaybes <$> (liftIO . getpairs =<< concat <$> findfiles) - where - getpairs = mapM $ \f -> do - let mt = parseTransferFile f - mi <- readTransferInfoFile Nothing f - return $ case (mt, mi) of - (Just t, Just i) -> Just (t, i) - _ -> Nothing - findfiles = liftIO . mapM dirContentsRecursive - =<< mapM (fromRepo . failedTransferDir u) - [Download, Upload] + where + getpairs = mapM $ \f -> do + let mt = parseTransferFile f + mi <- readTransferInfoFile Nothing f + return $ case (mt, mi) of + (Just t, Just i) -> Just (t, i) + _ -> Nothing + findfiles = liftIO . mapM dirContentsRecursive + =<< mapM (fromRepo . failedTransferDir u) [Download, Upload] removeFailedTransfer :: Transfer -> Annex () removeFailedTransfer t = do @@ -253,8 +254,8 @@ parseTransferFile file <*> pure (toUUID u) <*> fileKey key _ -> Nothing - where - bits = splitDirectories file + where + bits = splitDirectories file writeTransferInfoFile :: TransferInfo -> FilePath -> IO () writeTransferInfoFile info tfile = do @@ -291,16 +292,16 @@ readTransferInfo mpid s = TransferInfo <*> bytes <*> pure (if null filename then Nothing else Just filename) <*> pure False - where - (firstline, filename) = separate (== '\n') s - bits = split " " firstline - numbits = length bits - time = if numbits > 0 - then Just <$> parsePOSIXTime (bits !! 0) - else pure Nothing - bytes = if numbits > 1 - then Just <$> readish (bits !! 1) - else pure Nothing + where + (firstline, filename) = separate (== '\n') s + bits = split " " firstline + numbits = length bits + time = if numbits > 0 + then Just <$> parsePOSIXTime =<< headMaybe bits + else pure Nothing -- not failure + bytes = if numbits > 1 + then Just <$> readish =<< headMaybe (drop 1 bits) + else pure Nothing -- not failure parsePOSIXTime :: String -> Maybe POSIXTime parsePOSIXTime s = utcTimeToPOSIXSeconds diff --git a/Logs/Trust.hs b/Logs/Trust.hs index 1a29f8c..e5322e0 100644 --- a/Logs/Trust.hs +++ b/Logs/Trust.hs @@ -6,11 +6,14 @@ -} module Logs.Trust ( + trustLog, TrustLevel(..), trustGet, trustSet, trustPartition, + trustExclude, lookupTrust, + trustMapLoad, trustMapRaw, ) where @@ -65,27 +68,32 @@ trustPartition level ls candidates <- trustGet level return $ partition (`elem` candidates) ls -{- Read the trustLog into a map, overriding with any - - values from forcetrust or the git config. The map is cached for speed. -} +{- Filters UUIDs to those not matching a TrustLevel. -} +trustExclude :: TrustLevel -> [UUID] -> Annex ([UUID]) +trustExclude level ls = snd <$> trustPartition level ls + +{- trustLog in a map, overridden with any values from forcetrust or + - the git config. The map is cached for speed. -} trustMap :: Annex TrustMap -trustMap = do - cached <- Annex.getState Annex.trustmap - case cached of - Just m -> return m - Nothing -> do - overrides <- Annex.getState Annex.forcetrust - logged <- trustMapRaw - configured <- M.fromList . catMaybes - <$> (mapM configuredtrust =<< remoteList) - let m = M.union overrides $ M.union configured logged - Annex.changeState $ \s -> s { Annex.trustmap = Just m } - return m - where - configuredtrust r = - maybe Nothing (\l -> Just (Types.Remote.uuid r, l)) <$> - maybe Nothing readTrustLevel - <$> getTrustLevel (Types.Remote.repo r) +trustMap = maybe trustMapLoad return =<< Annex.getState Annex.trustmap + +{- Loads the map, updating the cache, -} +trustMapLoad :: Annex TrustMap +trustMapLoad = do + overrides <- Annex.getState Annex.forcetrust + logged <- trustMapRaw + configured <- M.fromList . catMaybes + <$> (mapM configuredtrust =<< remoteList) + let m = M.union overrides $ M.union configured logged + Annex.changeState $ \s -> s { Annex.trustmap = Just m } + return m + where + configuredtrust r = maybe Nothing (\l -> Just (Types.Remote.uuid r, l)) + <$> maybe Nothing readTrustLevel + <$> getTrustLevel (Types.Remote.repo r) +{- Does not include forcetrust or git config values, just those from the + - log file. -} trustMapRaw :: Annex TrustMap trustMapRaw = simpleMap . parseLog (Just . parseTrustLog) <$> Annex.Branch.get trustLog @@ -94,11 +102,11 @@ trustMapRaw = simpleMap . parseLog (Just . parseTrustLog) - trust status, which is why this defaults to Trusted. -} parseTrustLog :: String -> TrustLevel parseTrustLog s = maybe Trusted parse $ headMaybe $ words s - where - parse "1" = Trusted - parse "0" = UnTrusted - parse "X" = DeadTrusted - parse _ = SemiTrusted + where + parse "1" = Trusted + parse "0" = UnTrusted + parse "X" = DeadTrusted + parse _ = SemiTrusted showTrustLog :: TrustLevel -> String showTrustLog Trusted = "1" diff --git a/Logs/UUID.hs b/Logs/UUID.hs index d825e11..2f24a38 100644 --- a/Logs/UUID.hs +++ b/Logs/UUID.hs @@ -8,34 +8,38 @@ - - uuid.log stores a list of known uuids, and their descriptions. - - - Copyright 2010-2011 Joey Hess + - Copyright 2010-2012 Joey Hess - - Licensed under the GNU GPL version 3 or higher. -} module Logs.UUID ( + uuidLog, describeUUID, recordUUID, - uuidMap + uuidMap, + uuidMapLoad ) where import qualified Data.Map as M import Data.Time.Clock.POSIX +import Types.UUID import Common.Annex +import qualified Annex import qualified Annex.Branch import Logs.UUIDBased import qualified Annex.UUID {- Filename of uuid.log. -} -logfile :: FilePath -logfile = "uuid.log" +uuidLog :: FilePath +uuidLog = "uuid.log" {- Records a description for a uuid in the log. -} describeUUID :: UUID -> String -> Annex () describeUUID uuid desc = do ts <- liftIO getPOSIXTime - Annex.Branch.change logfile $ + Annex.Branch.change uuidLog $ showLog id . changeLog ts uuid desc . fixBadUUID . parseLog Just {- Temporarily here to fix badly formatted uuid logs generated by @@ -49,41 +53,47 @@ describeUUID uuid desc = do -} fixBadUUID :: Log String -> Log String fixBadUUID = M.fromList . map fixup . M.toList - where - fixup (k, v) - | isbad = (fixeduuid, LogEntry (Date $ newertime v) fixedvalue) - | otherwise = (k, v) - where - kuuid = fromUUID k - isbad = not (isuuid kuuid) && isuuid lastword - ws = words $ value v - lastword = Prelude.last ws - fixeduuid = toUUID lastword - fixedvalue = unwords $ kuuid: Prelude.init ws - -- For the fixed line to take precidence, it should be - -- slightly newer, but only slightly. - newertime (LogEntry (Date d) _) = d + minimumPOSIXTimeSlice - newertime (LogEntry Unknown _) = minimumPOSIXTimeSlice - minimumPOSIXTimeSlice = 0.000001 - isuuid s = length s == 36 && length (split "-" s) == 5 + where + fixup (k, v) + | isbad = (fixeduuid, LogEntry (Date $ newertime v) fixedvalue) + | otherwise = (k, v) + where + kuuid = fromUUID k + isbad = not (isuuid kuuid) && isuuid lastword + ws = words $ value v + lastword = Prelude.last ws + fixeduuid = toUUID lastword + fixedvalue = unwords $ kuuid: Prelude.init ws + -- For the fixed line to take precidence, it should be + -- slightly newer, but only slightly. + newertime (LogEntry (Date d) _) = d + minimumPOSIXTimeSlice + newertime (LogEntry Unknown _) = minimumPOSIXTimeSlice + minimumPOSIXTimeSlice = 0.000001 + isuuid s = length s == 36 && length (split "-" s) == 5 {- Records the uuid in the log, if it's not already there. -} recordUUID :: UUID -> Annex () recordUUID u = go . M.lookup u =<< uuidMap - where - go (Just "") = set - go Nothing = set - go _ = noop - set = describeUUID u "" + where + go (Just "") = set + go Nothing = set + go _ = noop + set = describeUUID u "" + +{- The map is cached for speed. -} +uuidMap :: Annex UUIDMap +uuidMap = maybe uuidMapLoad return =<< Annex.getState Annex.uuidmap {- Read the uuidLog into a simple Map. - - The UUID of the current repository is included explicitly, since - it may not have been described and so otherwise would not appear. -} -uuidMap :: Annex (M.Map UUID String) -uuidMap = do - m <- (simpleMap . parseLog Just) <$> Annex.Branch.get logfile +uuidMapLoad :: Annex UUIDMap +uuidMapLoad = do + m <- (simpleMap . parseLog Just) <$> Annex.Branch.get uuidLog u <- Annex.UUID.getUUID - return $ M.insertWith' preferold u "" m - where - preferold = flip const + let m' = M.insertWith' preferold u "" m + Annex.changeState $ \s -> s { Annex.uuidmap = Just m' } + return m' + where + preferold = flip const diff --git a/Logs/UUIDBased.hs b/Logs/UUIDBased.hs index 674ac21..c1901ee 100644 --- a/Logs/UUIDBased.hs +++ b/Logs/UUIDBased.hs @@ -50,36 +50,36 @@ tskey = "timestamp=" showLog :: (a -> String) -> Log a -> String showLog shower = unlines . map showpair . M.toList - where - showpair (k, LogEntry (Date p) v) = - unwords [fromUUID k, shower v, tskey ++ show p] - showpair (k, LogEntry Unknown v) = - unwords [fromUUID k, shower v] + where + showpair (k, LogEntry (Date p) v) = + unwords [fromUUID k, shower v, tskey ++ show p] + showpair (k, LogEntry Unknown v) = + unwords [fromUUID k, shower v] parseLog :: (String -> Maybe a) -> String -> Log a parseLog = parseLogWithUUID . const parseLogWithUUID :: (UUID -> String -> Maybe a) -> String -> Log a parseLogWithUUID parser = M.fromListWith best . mapMaybe parse . lines - where - parse line - | null ws = Nothing - | otherwise = parser u (unwords info) >>= makepair - where - makepair v = Just (u, LogEntry ts v) - ws = words line - u = toUUID $ Prelude.head ws - t = Prelude.last ws - ts - | tskey `isPrefixOf` t = - pdate $ drop 1 $ dropWhile (/= '=') t - | otherwise = Unknown - info - | ts == Unknown = drop 1 ws - | otherwise = drop 1 $ beginning ws - pdate s = case parseTime defaultTimeLocale "%s%Qs" s of - Nothing -> Unknown - Just d -> Date $ utcTimeToPOSIXSeconds d + where + parse line + | null ws = Nothing + | otherwise = parser u (unwords info) >>= makepair + where + makepair v = Just (u, LogEntry ts v) + ws = words line + u = toUUID $ Prelude.head ws + t = Prelude.last ws + ts + | tskey `isPrefixOf` t = + pdate $ drop 1 $ dropWhile (/= '=') t + | otherwise = Unknown + info + | ts == Unknown = drop 1 ws + | otherwise = drop 1 $ beginning ws + pdate s = case parseTime defaultTimeLocale "%s%Qs" s of + Nothing -> Unknown + Just d -> Date $ utcTimeToPOSIXSeconds d changeLog :: POSIXTime -> UUID -> a -> Log a -> Log a changeLog t u v = M.insert u $ LogEntry (Date t) v @@ -106,9 +106,9 @@ prop_TimeStamp_sane = Unknown < Date 1 prop_addLog_sane :: Bool prop_addLog_sane = newWins && newestWins - where - newWins = addLog (UUID "foo") (LogEntry (Date 1) "new") l == l2 - newestWins = addLog (UUID "foo") (LogEntry (Date 1) "newest") l2 /= l2 + where + newWins = addLog (UUID "foo") (LogEntry (Date 1) "new") l == l2 + newestWins = addLog (UUID "foo") (LogEntry (Date 1) "newest") l2 /= l2 - l = M.fromList [(UUID "foo", LogEntry (Date 0) "old")] - l2 = M.fromList [(UUID "foo", LogEntry (Date 1) "new")] + l = M.fromList [(UUID "foo", LogEntry (Date 0) "old")] + l2 = M.fromList [(UUID "foo", LogEntry (Date 1) "new")] diff --git a/Logs/Unused.hs b/Logs/Unused.hs index 522c523..9f1278d 100644 --- a/Logs/Unused.hs +++ b/Logs/Unused.hs @@ -35,13 +35,12 @@ readUnusedLog prefix = do <$> liftIO (readFile f) , return M.empty ) - where - parse line = - case (readish tag, file2key rest) of - (Just num, Just key) -> Just (num, key) - _ -> Nothing - where - (tag, rest) = separate (== ' ') line + where + parse line = case (readish tag, file2key rest) of + (Just num, Just key) -> Just (num, key) + _ -> Nothing + where + (tag, rest) = separate (== ' ') line type UnusedMap = M.Map Int Key @@ -64,10 +63,10 @@ unusedSpec :: String -> [Int] unusedSpec spec | "-" `isInfixOf` spec = range $ separate (== '-') spec | otherwise = catMaybes [readish spec] - where - range (a, b) = case (readish a, readish b) of - (Just x, Just y) -> [x..y] - _ -> [] + where + range (a, b) = case (readish a, readish b) of + (Just x, Just y) -> [x..y] + _ -> [] {- Start action for unused content. Finds the number in the maps, and - calls either of 3 actions, depending on the type of unused file. -} @@ -81,11 +80,11 @@ startUnused message unused badunused tmpunused maps n = search , (unusedBadMap maps, badunused) , (unusedTmpMap maps, tmpunused) ] - where - search [] = stop - search ((m, a):rest) = - case M.lookup n m of - Nothing -> search rest - Just key -> do - showStart message (show n) - next $ a key + where + search [] = stop + search ((m, a):rest) = + case M.lookup n m of + Nothing -> search rest + Just key -> do + showStart message (show n) + next $ a key diff --git a/Logs/Web.hs b/Logs/Web.hs index 534bd53..c2a4deb 100644 --- a/Logs/Web.hs +++ b/Logs/Web.hs @@ -37,13 +37,13 @@ oldurlLogs key = {- Gets all urls that a key might be available from. -} getUrls :: Key -> Annex [URLString] getUrls key = go $ urlLog key : oldurlLogs key - where - go [] = return [] - go (l:ls) = do - us <- currentLog l - if null us - then go ls - else return us + where + go [] = return [] + go (l:ls) = do + us <- currentLog l + if null us + then go ls + else return us {- Records a change in an url for a key. -} setUrl :: Key -> URLString -> LogStatus -> Annex () diff --git a/Makefile b/Makefile index 1292bba..a98949e 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,13 @@ CFLAGS=-Wall GIT_ANNEX_TMP_BUILD_DIR?=tmp -IGNORE=-ignore-package monads-fd -ignore-package monads-tf -BASEFLAGS=-Wall $(IGNORE) -outputdir $(GIT_ANNEX_TMP_BUILD_DIR) -IUtility +BASEFLAGS=-Wall -outputdir $(GIT_ANNEX_TMP_BUILD_DIR) -IUtility # If you get build failures due to missing haskell libraries, # you can turn off some of these features. # # If you're using an old version of yesod, enable -DWITH_OLD_YESOD -FEATURES?=$(GIT_ANNEX_LOCAL_FEATURES) -DWITH_ASSISTANT -DWITH_S3 -DWITH_WEBAPP -DWITH_PAIRING +# Or with an old version of the uri library, enable -DWITH_OLD_URI +FEATURES?=$(GIT_ANNEX_LOCAL_FEATURES) -DWITH_ASSISTANT -DWITH_S3 -DWITH_WEBAPP -DWITH_PAIRING -DWITH_XMPP -DWITH_DNS bins=git-annex mans=git-annex.1 git-annex-shell.1 @@ -16,10 +16,14 @@ all=$(bins) $(mans) docs OS:=$(shell uname | sed 's/[-_].*//') ifeq ($(OS),Linux) -OPTFLAGS?=-DWITH_INOTIFY +OPTFLAGS?=-DWITH_INOTIFY -DWITH_DBUS clibs=Utility/libdiskfree.o Utility/libmounts.o THREADFLAGS=$(shell if test -e `ghc --print-libdir`/libHSrts_thr.a; then printf -- -threaded; fi) else +ifeq ($(OS),SunOS) +# Solaris is not supported by the assistant or watch command. +FEATURES:=$(shell echo $(FEATURES) | sed -e 's/-DWITH_ASSISTANT//' -e 's/-DWITH_WEBAPP//') +else # BSD system THREADFLAGS=-threaded OPTFLAGS?=-DWITH_KQUEUE @@ -32,6 +36,7 @@ CFLAGS=-Wall -m32 endif endif endif +endif ALLFLAGS = $(BASEFLAGS) $(FEATURES) $(OPTFLAGS) $(THREADFLAGS) @@ -139,11 +144,10 @@ hackage: sdist @cabal upload dist/*.tar.gz THIRDPARTY_BINS=git curl lsof xargs rsync uuid wget gpg \ - sha1sum sha224sum sha256sum sha384sum sha512sum + sha1sum sha224sum sha256sum sha384sum sha512sum cp LINUXSTANDALONE_DEST=$(GIT_ANNEX_TMP_BUILD_DIR)/git-annex.linux linuxstandalone: - $(MAKE) clean GIT_ANNEX_LOCAL_FEATURES="$(GIT_ANNEX_LOCAL_FEATURES) -DWITH_OLD_SSH" $(MAKE) git-annex rm -rf "$(LINUXSTANDALONE_DEST)" @@ -182,7 +186,6 @@ linuxstandalone: OSXAPP_DEST=$(GIT_ANNEX_TMP_BUILD_DIR)/build-dmg/git-annex.app OSXAPP_BASE=$(OSXAPP_DEST)/Contents/MacOS osxapp: - $(MAKE) clean GIT_ANNEX_LOCAL_FEATURES="$(GIT_ANNEX_LOCAL_FEATURES) -DWITH_OLD_SSH" $(MAKE) git-annex rm -rf "$(OSXAPP_DEST)" @@ -218,4 +221,8 @@ osxapp: rm -f tmp/git-annex.dmg.bz2 bzip2 tmp/git-annex.dmg +# used by ./ghci +getflags: + @echo $(ALLFLAGS) $(clibs) + .PHONY: $(bins) test install diff --git a/Messages.hs b/Messages.hs index d8d84d1..f3cd9fc 100644 --- a/Messages.hs +++ b/Messages.hs @@ -65,29 +65,29 @@ showProgress = handle q $ - The action is passed a callback to use to update the meter. -} metered :: (Maybe MeterUpdate) -> Key -> (MeterUpdate -> Annex a) -> Annex a metered combinemeterupdate key a = withOutputType $ go (keySize key) - where - go (Just size) NormalOutput = do - progress <- liftIO $ newProgress "" size - meter <- liftIO $ newMeter progress "B" 25 (renderNums binaryOpts 1) - showOutput - liftIO $ displayMeter stdout meter - r <- a $ \n -> liftIO $ do - incrP progress n - displayMeter stdout meter - maybe noop (\m -> m n) combinemeterupdate - liftIO $ clearMeter stdout meter - return r - go _ _ = a (const noop) + where + go (Just size) NormalOutput = do + progress <- liftIO $ newProgress "" size + meter <- liftIO $ newMeter progress "B" 25 (renderNums binaryOpts 1) + showOutput + liftIO $ displayMeter stdout meter + r <- a $ \n -> liftIO $ do + incrP progress n + displayMeter stdout meter + maybe noop (\m -> m n) combinemeterupdate + liftIO $ clearMeter stdout meter + return r + go _ _ = a (const noop) showSideAction :: String -> Annex () showSideAction m = Annex.getState Annex.output >>= go - where - go (MessageState v StartBlock) = do - p - Annex.changeState $ \s -> s { Annex.output = MessageState v InBlock } - go (MessageState _ InBlock) = return () - go _ = p - p = handle q $ putStrLn $ "(" ++ m ++ "...)" + where + go (MessageState v StartBlock) = do + p + Annex.changeState $ \s -> s { Annex.output = MessageState v InBlock } + go (MessageState _ InBlock) = return () + go _ = p + p = handle q $ putStrLn $ "(" ++ m ++ "...)" showStoringStateAction :: Annex () showStoringStateAction = showSideAction "Recording state in git" @@ -106,8 +106,8 @@ doSideAction' b a = do o <- Annex.getState Annex.output set $ o { sideActionBlock = b } set o `after` a - where - set o = Annex.changeState $ \s -> s { Annex.output = o } + where + set o = Annex.changeState $ \s -> s { Annex.output = o } showOutput :: Annex () showOutput = handle q $ @@ -125,10 +125,10 @@ showEndFail = showEndResult False showEndResult :: Bool -> Annex () showEndResult ok = handle (JSON.end ok) $ putStrLn msg - where - msg - | ok = "ok" - | otherwise = "failed" + where + msg + | ok = "ok" + | otherwise = "failed" showErr :: (Show a) => a -> Annex () showErr e = warning' $ "git-annex: " ++ show e @@ -153,9 +153,9 @@ maybeShowJSON v = handle (JSON.add v) q {- Shows a complete JSON value, only when in json mode. -} showFullJSON :: JSON a => [(String, a)] -> Annex Bool showFullJSON v = withOutputType $ liftIO . go - where - go JSONOutput = JSON.complete v >> return True - go _ = return False + where + go JSONOutput = JSON.complete v >> return True + go _ = return False {- Performs an action that outputs nonstandard/customized output, and - in JSON mode wraps its output in JSON.start and JSON.end, so it's @@ -184,10 +184,10 @@ setupConsole = do handle :: IO () -> IO () -> Annex () handle json normal = withOutputType go - where - go NormalOutput = liftIO normal - go QuietOutput = q - go JSONOutput = liftIO $ flushed json + where + go NormalOutput = liftIO normal + go QuietOutput = q + go JSONOutput = liftIO $ flushed json q :: Monad m => m () q = noop diff --git a/Messages/JSON.hs b/Messages/JSON.hs index f7a031e..e262192 100644 --- a/Messages/JSON.hs +++ b/Messages/JSON.hs @@ -20,9 +20,9 @@ import qualified Utility.JSONStream as Stream start :: String -> Maybe String -> IO () start command file = putStr $ Stream.start $ ("command", command) : filepart file - where - filepart Nothing = [] - filepart (Just f) = [("file", f)] + where + filepart Nothing = [] + filepart (Just f) = [("file", f)] end :: Bool -> IO () end b = putStr $ Stream.add [("success", b)] ++ Stream.end diff --git a/Option.hs b/Option.hs index ff70fb6..1475aaf 100644 --- a/Option.hs +++ b/Option.hs @@ -46,18 +46,18 @@ common = , Option ['b'] ["backend"] (ReqArg setforcebackend paramName) "specify key-value backend to use" ] - where - setforce v = Annex.changeState $ \s -> s { Annex.force = v } - setfast v = Annex.changeState $ \s -> s { Annex.fast = v } - setauto v = Annex.changeState $ \s -> s { Annex.auto = v } - setforcebackend v = Annex.changeState $ \s -> s { Annex.forcebackend = Just v } - setdebug = liftIO $ do - s <- simpledebug - updateGlobalLogger rootLoggerName - (setLevel DEBUG . setHandlers [s]) - simpledebug = setFormatter - <$> streamHandler stderr DEBUG - <*> pure (simpleLogFormatter "[$time] $msg") + where + setforce v = Annex.changeState $ \s -> s { Annex.force = v } + setfast v = Annex.changeState $ \s -> s { Annex.fast = v } + setauto v = Annex.changeState $ \s -> s { Annex.auto = v } + setforcebackend v = Annex.changeState $ \s -> s { Annex.forcebackend = Just v } + setdebug = liftIO $ do + s <- simpledebug + updateGlobalLogger rootLoggerName + (setLevel DEBUG . setHandlers [s]) + simpledebug = setFormatter + <$> streamHandler stderr DEBUG + <*> pure (simpleLogFormatter "[$time] $msg") matcher :: [Option] matcher = @@ -67,9 +67,9 @@ matcher = , shortopt "(" "open group of options" , shortopt ")" "close group of options" ] - where - longopt o = Option [] [o] $ NoArg $ addToken o - shortopt o = Option o [] $ NoArg $ addToken o + where + longopt o = Option [] [o] $ NoArg $ addToken o + shortopt o = Option o [] $ NoArg $ addToken o {- An option that sets a flag. -} flag :: String -> String -> String -> Option diff --git a/Remote.hs b/Remote.hs index e1ff9e7..721b64e 100644 --- a/Remote.hs +++ b/Remote.hs @@ -80,10 +80,10 @@ byName (Just n) = either error Just <$> byName' n byName' :: String -> Annex (Either String Remote) byName' "" = return $ Left "no remote specified" byName' n = handle . filter matching <$> remoteList - where - handle [] = Left $ "there is no available git remote named \"" ++ n ++ "\"" - handle match = Right $ Prelude.head match - matching r = n == name r || toUUID n == uuid r + where + handle [] = Left $ "there is no available git remote named \"" ++ n ++ "\"" + handle match = Right $ Prelude.head match + matching r = n == name r || toUUID n == uuid r {- Looks up a remote by name (or by UUID, or even by description), - and returns its UUID. Finds even remotes that are not configured in @@ -93,17 +93,17 @@ nameToUUID "." = getUUID -- special case for current repo nameToUUID "here" = getUUID nameToUUID "" = error "no remote specified" nameToUUID n = byName' n >>= go - where - go (Right r) = return $ uuid r - go (Left e) = fromMaybe (error e) <$> bydescription - bydescription = do - m <- uuidMap - case M.lookup n $ transform swap m of - Just u -> return $ Just u - Nothing -> return $ byuuid m - byuuid m = M.lookup (toUUID n) $ transform double m - transform a = M.fromList . map a . M.toList - double (a, _) = (a, a) + where + go (Right r) = return $ uuid r + go (Left e) = fromMaybe (error e) <$> bydescription + bydescription = do + m <- uuidMap + case M.lookup n $ transform swap m of + Just u -> return $ Just u + Nothing -> return $ byuuid m + byuuid m = M.lookup (toUUID n) $ transform double m + transform a = M.fromList . map a . M.toList + double (a, _) = (a, a) {- Pretty-prints a list of UUIDs of remotes, for human display. - @@ -115,23 +115,23 @@ prettyPrintUUIDs desc uuids = do m <- uuidDescriptions maybeShowJSON [(desc, map (jsonify m hereu) uuids)] return $ unwords $ map (\u -> "\t" ++ prettify m hereu u ++ "\n") uuids - where - finddescription m u = M.findWithDefault "" u m - prettify m hereu u - | not (null d) = fromUUID u ++ " -- " ++ d - | otherwise = fromUUID u - where - ishere = hereu == u - n = finddescription m u - d - | null n && ishere = "here" - | ishere = addName n "here" - | otherwise = n - jsonify m hereu u = toJSObject - [ ("uuid", toJSON $ fromUUID u) - , ("description", toJSON $ finddescription m u) - , ("here", toJSON $ hereu == u) - ] + where + finddescription m u = M.findWithDefault "" u m + prettify m hereu u + | not (null d) = fromUUID u ++ " -- " ++ d + | otherwise = fromUUID u + where + ishere = hereu == u + n = finddescription m u + d + | null n && ishere = "here" + | ishere = addName n "here" + | otherwise = n + jsonify m hereu u = toJSObject + [ ("uuid", toJSON $ fromUUID u) + , ("description", toJSON $ finddescription m u) + , ("here", toJSON $ hereu == u) + ] {- List of remote names and/or descriptions, for human display. -} prettyListUUIDs :: [UUID] -> Annex [String] @@ -139,13 +139,13 @@ prettyListUUIDs uuids = do hereu <- getUUID m <- uuidDescriptions return $ map (\u -> prettify m hereu u) uuids - where - finddescription m u = M.findWithDefault "" u m - prettify m hereu u - | u == hereu = addName n "here" - | otherwise = n - where - n = finddescription m u + where + finddescription m u = M.findWithDefault "" u m + prettify m hereu u + | u == hereu = addName n "here" + | otherwise = n + where + n = finddescription m u {- Gets the git repo associated with a UUID. - There's no associated remote when this is the UUID of the local repo. -} @@ -169,7 +169,7 @@ remotesWithoutUUID rs us = filter (\r -> uuid r `notElem` us) rs {- List of repository UUIDs that the location log indicates may have a key. - Dead repositories are excluded. -} keyLocations :: Key -> Annex [UUID] -keyLocations key = snd <$> (trustPartition DeadTrusted =<< loggedLocations key) +keyLocations key = trustExclude DeadTrusted =<< loggedLocations key {- Cost ordered lists of remotes that the location log indicates - may have a key. @@ -213,12 +213,12 @@ showLocations key exclude = do ppuuidswanted <- Remote.prettyPrintUUIDs "wanted" uuidswanted ppuuidsskipped <- Remote.prettyPrintUUIDs "skipped" uuidsskipped showLongNote $ message ppuuidswanted ppuuidsskipped - where - filteruuids l x = filter (`notElem` x) l - message [] [] = "No other repository is known to contain the file." - message rs [] = "Try making some of these repositories available:\n" ++ rs - message [] us = "Also these untrusted repositories may contain the file:\n" ++ us - message rs us = message rs [] ++ message [] us + where + filteruuids l x = filter (`notElem` x) l + message [] [] = "No other repository is known to contain the file." + message rs [] = "Try making some of these repositories available:\n" ++ rs + message [] us = "Also these untrusted repositories may contain the file:\n" ++ us + message rs us = message rs [] ++ message [] us showTriedRemotes :: [Remote] -> Annex () showTriedRemotes [] = noop @@ -242,6 +242,6 @@ logStatus remote key = logChange key (uuid remote) {- Orders remotes by cost, with ones with the lowest cost grouped together. -} byCost :: [Remote] -> [[Remote]] byCost = map snd . sort . M.toList . costmap - where - costmap = M.fromListWith (++) . map costpair - costpair r = (cost r, [r]) + where + costmap = M.fromListWith (++) . map costpair + costpair r = (cost r, [r]) diff --git a/Remote/Bup.hs b/Remote/Bup.hs index 2249f5b..f5bcc4f 100644 --- a/Remote/Bup.hs +++ b/Remote/Bup.hs @@ -26,6 +26,7 @@ import Remote.Helper.Encryptable import Crypto import Data.ByteString.Lazy.UTF8 (fromString) import Data.Digest.Pure.SHA +import Utility.UserInfo type BupRepo = String @@ -105,24 +106,24 @@ pipeBup params inh outh = do ExitSuccess -> return True _ -> return False -bupSplitParams :: Git.Repo -> BupRepo -> Key -> CommandParam -> Annex [CommandParam] +bupSplitParams :: Git.Repo -> BupRepo -> Key -> [CommandParam] -> Annex [CommandParam] bupSplitParams r buprepo k src = do o <- getRemoteConfig r "bup-split-options" "" let os = map Param $ words o showOutput -- make way for bup output return $ bupParams "split" buprepo - (os ++ [Param "-n", Param (bupRef k), src]) + (os ++ [Param "-n", Param (bupRef k)] ++ src) store :: Git.Repo -> BupRepo -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool store r buprepo k _f _p = do src <- inRepo $ gitAnnexLocation k - params <- bupSplitParams r buprepo k (File src) + params <- bupSplitParams r buprepo k [File src] liftIO $ boolSystem "bup" params storeEncrypted :: Git.Repo -> BupRepo -> (Cipher, Key) -> Key -> MeterUpdate -> Annex Bool storeEncrypted r buprepo (cipher, enck) k _p = do src <- inRepo $ gitAnnexLocation k - params <- bupSplitParams r buprepo enck (Param "-") + params <- bupSplitParams r buprepo enck [] liftIO $ catchBoolIO $ withEncryptedHandle cipher (L.readFile src) $ \h -> pipeBup params (Just h) Nothing @@ -142,9 +143,9 @@ retrieveEncrypted buprepo (cipher, enck) _ f = liftIO $ catchBoolIO $ withHandle StdoutHandle createProcessSuccess p $ \h -> do withDecryptedContent cipher (L.hGetContents h) $ L.writeFile f return True - where - params = bupParams "join" buprepo [Param $ bupRef enck] - p = proc "bup" $ toCommand params + where + params = bupParams "join" buprepo [Param $ bupRef enck] + p = proc "bup" $ toCommand params remove :: Key -> Annex Bool remove _ = do @@ -163,10 +164,11 @@ checkPresent r bupr k return $ Right ok | otherwise = liftIO $ catchMsgIO $ boolSystem "git" $ Git.Command.gitCommandLine params bupr - where - params = - [ Params "show-ref --quiet --verify" - , Param $ "refs/heads/" ++ bupRef k] + where + params = + [ Params "show-ref --quiet --verify" + , Param $ "refs/heads/" ++ bupRef k + ] {- Store UUID in the annex.uuid setting of the bup repository. -} storeBupUUID :: UUID -> BupRepo -> Annex () @@ -184,8 +186,8 @@ storeBupUUID u buprepo = do when (olduuid == "") $ Git.Command.run "config" [Param "annex.uuid", Param v] r' - where - v = fromUUID u + where + v = fromUUID u onBupRemote :: Git.Repo -> (FilePath -> [CommandParam] -> IO a) -> FilePath -> [CommandParam] -> Annex a onBupRemote r a command params = do @@ -226,17 +228,17 @@ bup2GitRemote r then Git.Construct.fromAbsPath r else error "please specify an absolute path" | otherwise = Git.Construct.fromUrl $ "ssh://" ++ host ++ slash dir - where - bits = split ":" r - host = Prelude.head bits - dir = join ":" $ drop 1 bits - -- "host:~user/dir" is not supported specially by bup; - -- "host:dir" is relative to the home directory; - -- "host:" goes in ~/.bup - slash d - | null d = "/~/.bup" - | "/" `isPrefixOf` d = d - | otherwise = "/~/" ++ d + where + bits = split ":" r + host = Prelude.head bits + dir = join ":" $ drop 1 bits + -- "host:~user/dir" is not supported specially by bup; + -- "host:dir" is relative to the home directory; + -- "host:" goes in ~/.bup + slash d + | null d = "/~/.bup" + | "/" `isPrefixOf` d = d + | otherwise = "/~/" ++ d {- Converts a key into a git ref name, which bup-split -n will use to point - to it. -} @@ -244,8 +246,8 @@ bupRef :: Key -> String bupRef k | Git.Ref.legal True shown = shown | otherwise = "git-annex-" ++ showDigest (sha256 (fromString shown)) - where - shown = key2file k + where + shown = key2file k bupLocal :: BupRepo -> Bool bupLocal = notElem ':' diff --git a/Remote/Directory.hs b/Remote/Directory.hs index bac5318..006638a 100644 --- a/Remote/Directory.hs +++ b/Remote/Directory.hs @@ -57,7 +57,6 @@ gen r u c = do readonly = False, remotetype = remote } - where type ChunkSize = Maybe Int64 @@ -101,25 +100,25 @@ chunkCount f = f ++ ".chunkcount" withCheckedFiles :: (FilePath -> IO Bool) -> ChunkSize -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool withCheckedFiles _ _ [] _ _ = return False withCheckedFiles check Nothing d k a = go $ locations d k - where - go [] = return False - go (f:fs) = ifM (check f) ( a [f] , go fs ) + where + go [] = return False + go (f:fs) = ifM (check f) ( a [f] , go fs ) withCheckedFiles check (Just _) d k a = go $ locations d k - where - go [] = return False - go (f:fs) = do - let chunkcount = chunkCount f - use <- check chunkcount - if use - then do - count <- readcount chunkcount - let chunks = take count $ chunkStream f - ifM (all id <$> mapM check chunks) - ( a chunks , return False ) - else go fs - readcount f = fromMaybe (error $ "cannot parse " ++ f) - . (readish :: String -> Maybe Int) - <$> readFile f + where + go [] = return False + go (f:fs) = do + let chunkcount = chunkCount f + ifM (check chunkcount) + ( do + count <- readcount chunkcount + let chunks = take count $ chunkStream f + ifM (all id <$> mapM check chunks) + ( a chunks , return False ) + , go fs + ) + readcount f = fromMaybe (error $ "cannot parse " ++ f) + . (readish :: String -> Maybe Int) + <$> readFile f withStoredFiles :: ChunkSize -> FilePath -> Key -> ([FilePath] -> IO Bool) -> IO Bool withStoredFiles = withCheckedFiles doesFileExist @@ -170,39 +169,39 @@ storeSplit' _ _ _ [] c = return $ reverse c storeSplit' meterupdate chunksize (d:dests) bs c = do bs' <- E.bracket (openFile d WriteMode) hClose (feed chunksize bs) storeSplit' meterupdate chunksize dests bs' (d:c) - where - feed _ [] _ = return [] - feed sz (l:ls) h = do - let s = fromIntegral $ S.length l - if s <= sz - then do - S.hPut h l - meterupdate $ toInteger s - feed (sz - s) ls h - else return (l:ls) + where + feed _ [] _ = return [] + feed sz (l:ls) h = do + let s = fromIntegral $ S.length l + if s <= sz + then do + S.hPut h l + meterupdate $ toInteger s + feed (sz - s) ls h + else return (l:ls) {- Write a L.ByteString to a file, updating a progress meter - after each chunk of the L.ByteString, typically every 64 kb or so. -} meteredWriteFile :: MeterUpdate -> FilePath -> L.ByteString -> IO () meteredWriteFile meterupdate dest b = meteredWriteFile' meterupdate dest (L.toChunks b) feeder - where - feeder chunks = return ([], chunks) + where + feeder chunks = return ([], chunks) {- Writes a series of S.ByteString chunks to a file, updating a progress - meter after each chunk. The feeder is called to get more chunks. -} meteredWriteFile' :: MeterUpdate -> FilePath -> s -> (s -> IO (s, [S.ByteString])) -> IO () meteredWriteFile' meterupdate dest startstate feeder = E.bracket (openFile dest WriteMode) hClose (feed startstate []) - where - feed state [] h = do - (state', cs) <- feeder state - unless (null cs) $ - feed state' cs h - feed state (c:cs) h = do - S.hPut h c - meterupdate $ toInteger $ S.length c - feed state cs h + where + feed state [] h = do + (state', cs) <- feeder state + unless (null cs) $ + feed state' cs h + feed state (c:cs) h = do + S.hPut h c + meterupdate $ toInteger $ S.length c + feed state cs h {- Generates a list of destinations to write to in order to store a key. - When chunksize is specified, this list will be a list of chunks. @@ -213,36 +212,36 @@ meteredWriteFile' meterupdate dest startstate feeder = -} storeHelper :: FilePath -> ChunkSize -> Key -> ([FilePath] -> IO [FilePath]) -> Annex Bool storeHelper d chunksize key a = prep <&&> check <&&> go - where - desttemplate = Prelude.head $ locations d key - dir = parentDir desttemplate - tmpdests = case chunksize of - Nothing -> [desttemplate ++ tmpprefix] - Just _ -> map (++ tmpprefix) (chunkStream desttemplate) - tmpprefix = ".tmp" - detmpprefix f = take (length f - tmpprefixlen) f - tmpprefixlen = length tmpprefix - prep = liftIO $ catchBoolIO $ do - createDirectoryIfMissing True dir - allowWrite dir - return True - {- The size is not exactly known when encrypting the key; - - this assumes that at least the size of the key is - - needed as free space. -} - check = checkDiskSpace (Just dir) key 0 - go = liftIO $ catchBoolIO $ do - stored <- a tmpdests - forM_ stored $ \f -> do - let dest = detmpprefix f - renameFile f dest - preventWrite dest - when (chunksize /= Nothing) $ do - let chunkcount = chunkCount desttemplate - _ <- tryIO $ allowWrite chunkcount - writeFile chunkcount (show $ length stored) - preventWrite chunkcount - preventWrite dir - return (not $ null stored) + where + desttemplate = Prelude.head $ locations d key + dir = parentDir desttemplate + tmpdests = case chunksize of + Nothing -> [desttemplate ++ tmpprefix] + Just _ -> map (++ tmpprefix) (chunkStream desttemplate) + tmpprefix = ".tmp" + detmpprefix f = take (length f - tmpprefixlen) f + tmpprefixlen = length tmpprefix + prep = liftIO $ catchBoolIO $ do + createDirectoryIfMissing True dir + allowWrite dir + return True + {- The size is not exactly known when encrypting the key; + - this assumes that at least the size of the key is + - needed as free space. -} + check = checkDiskSpace (Just dir) key 0 + go = liftIO $ catchBoolIO $ do + stored <- a tmpdests + forM_ stored $ \f -> do + let dest = detmpprefix f + renameFile f dest + preventWrite dest + when (chunksize /= Nothing) $ do + let chunkcount = chunkCount desttemplate + _ <- tryIO $ allowWrite chunkcount + writeFile chunkcount (show $ length stored) + preventWrite chunkcount + preventWrite dir + return (not $ null stored) retrieve :: FilePath -> ChunkSize -> Key -> AssociatedFile -> FilePath -> Annex Bool retrieve d chunksize k _ f = metered Nothing k $ \meterupdate -> @@ -250,11 +249,11 @@ retrieve d chunksize k _ f = metered Nothing k $ \meterupdate -> catchBoolIO $ do meteredWriteFile' meterupdate f files feeder return True - where - feeder [] = return ([], []) - feeder (x:xs) = do - chunks <- L.toChunks <$> L.readFile x - return (xs, chunks) + where + feeder [] = return ([], []) + feeder (x:xs) = do + chunks <- L.toChunks <$> L.readFile x + return (xs, chunks) retrieveEncrypted :: FilePath -> ChunkSize -> (Cipher, Key) -> Key -> FilePath -> Annex Bool retrieveEncrypted d chunksize (cipher, enck) k f = metered Nothing k $ \meterupdate -> @@ -267,20 +266,20 @@ retrieveEncrypted d chunksize (cipher, enck) k f = metered Nothing k $ \meterupd retrieveCheap :: FilePath -> ChunkSize -> Key -> FilePath -> Annex Bool retrieveCheap _ (Just _) _ _ = return False -- no cheap retrieval for chunks retrieveCheap d _ k f = liftIO $ withStoredFiles Nothing d k go - where - go [file] = catchBoolIO $ createSymbolicLink file f >> return True - go _files = return False + where + go [file] = catchBoolIO $ createSymbolicLink file f >> return True + go _files = return False remove :: FilePath -> ChunkSize -> Key -> Annex Bool remove d chunksize k = liftIO $ withStoredFiles chunksize d k go - where - go = all id <$$> mapM removefile - removefile file = catchBoolIO $ do - let dir = parentDir file - allowWrite dir - removeFile file - _ <- tryIO $ removeDirectory dir - return True + where + go = all id <$$> mapM removefile + removefile file = catchBoolIO $ do + let dir = parentDir file + allowWrite dir + removeFile file + _ <- tryIO $ removeDirectory dir + return True checkPresent :: FilePath -> ChunkSize -> Key -> Annex (Either String Bool) checkPresent d chunksize k = liftIO $ catchMsgIO $ withStoredFiles chunksize d k $ diff --git a/Remote/Git.hs b/Remote/Git.hs index cc524fd..24dd9bf 100644 --- a/Remote/Git.hs +++ b/Remote/Git.hs @@ -39,6 +39,7 @@ import Types.Key import qualified Fields import Control.Concurrent +import Control.Concurrent.MSampleVar import System.Process (std_in, std_err) remote :: RemoteType @@ -54,15 +55,15 @@ list = do c <- fromRepo Git.config rs <- mapM (tweakurl c) =<< fromRepo Git.remotes mapM configRead rs - where - annexurl n = "remote." ++ n ++ ".annexurl" - tweakurl c r = do - let n = fromJust $ Git.remoteName r - case M.lookup (annexurl n) c of - Nothing -> return r - Just url -> inRepo $ \g -> - Git.Construct.remoteNamed n $ - Git.Construct.fromRemoteLocation url g + where + annexurl n = "remote." ++ n ++ ".annexurl" + tweakurl c r = do + let n = fromJust $ Git.remoteName r + case M.lookup (annexurl n) c of + Nothing -> return r + Just url -> inRepo $ \g -> + Git.Construct.remoteNamed n $ + Git.Construct.fromRemoteLocation url g {- It's assumed to be cheap to read the config of non-URL remotes, so this is - done each time git-annex is run in a way that uses remotes. @@ -84,28 +85,27 @@ repoCheap = not . Git.repoIsUrl gen :: Git.Repo -> UUID -> Maybe RemoteConfig -> Annex Remote gen r u _ = new <$> remoteCost r defcst - where - defcst = if repoCheap r then cheapRemoteCost else expensiveRemoteCost - new cst = Remote - { uuid = u - , cost = cst - , name = Git.repoDescribe r - , storeKey = copyToRemote r - , retrieveKeyFile = copyFromRemote r - , retrieveKeyFileCheap = copyFromRemoteCheap r - , removeKey = dropKey r - , hasKey = inAnnex r - , hasKeyCheap = repoCheap r - , whereisKey = Nothing - , config = Nothing - , localpath = if Git.repoIsLocal r || Git.repoIsLocalUnknown r - then Just $ Git.repoPath r - else Nothing - , repo = r - , readonly = Git.repoIsHttp r - , remotetype = remote - } - + where + defcst = if repoCheap r then cheapRemoteCost else expensiveRemoteCost + new cst = Remote + { uuid = u + , cost = cst + , name = Git.repoDescribe r + , storeKey = copyToRemote r + , retrieveKeyFile = copyFromRemote r + , retrieveKeyFileCheap = copyFromRemoteCheap r + , removeKey = dropKey r + , hasKey = inAnnex r + , hasKeyCheap = repoCheap r + , whereisKey = Nothing + , config = Nothing + , localpath = if Git.repoIsLocal r || Git.repoIsLocalUnknown r + then Just $ Git.repoPath r + else Nothing + , repo = r + , readonly = Git.repoIsHttp r + , remotetype = remote + } {- Checks relatively inexpensively if a repository is available for use. -} repoAvail :: Git.Repo -> Annex Bool @@ -148,40 +148,40 @@ tryGitConfigRead r | otherwise = store $ safely $ onLocal r $ do ensureInitialized Annex.getState Annex.repo - where - -- Reading config can fail due to IO error or - -- for other reasons; catch all possible exceptions. - safely a = either (const $ return r) return - =<< liftIO (try a :: IO (Either SomeException Git.Repo)) + where + -- Reading config can fail due to IO error or + -- for other reasons; catch all possible exceptions. + safely a = either (const $ return r) return + =<< liftIO (try a :: IO (Either SomeException Git.Repo)) - pipedconfig cmd params = - withHandle StdoutHandle createProcessSuccess p $ - Git.Config.hRead r - where - p = proc cmd $ toCommand params + pipedconfig cmd params = + withHandle StdoutHandle createProcessSuccess p $ + Git.Config.hRead r + where + p = proc cmd $ toCommand params - pipedsshconfig cmd params = - liftIO (try (pipedconfig cmd params) :: IO (Either SomeException Git.Repo)) + pipedsshconfig cmd params = + liftIO (try (pipedconfig cmd params) :: IO (Either SomeException Git.Repo)) - geturlconfig headers = do - s <- Url.get (Git.repoLocation r ++ "/config") headers - withTempFile "git-annex.tmp" $ \tmpfile h -> do - hPutStr h s - hClose h - safely $ pipedconfig "git" [Param "config", Param "--null", Param "--list", Param "--file", File tmpfile] + geturlconfig headers = do + s <- Url.get (Git.repoLocation r ++ "/config") headers + withTempFile "git-annex.tmp" $ \tmpfile h -> do + hPutStr h s + hClose h + safely $ pipedconfig "git" [Param "config", Param "--null", Param "--list", Param "--file", File tmpfile] - store = observe $ \r' -> do - g <- gitRepo - let l = Git.remotes g - let g' = g { Git.remotes = exchange l r' } - Annex.changeState $ \s -> s { Annex.repo = g' } + store = observe $ \r' -> do + g <- gitRepo + let l = Git.remotes g + let g' = g { Git.remotes = exchange l r' } + Annex.changeState $ \s -> s { Annex.repo = g' } - exchange [] _ = [] - exchange (old:ls) new - | Git.remoteName old == Git.remoteName new = - new : exchange ls new - | otherwise = - old : exchange ls new + exchange [] _ = [] + exchange (old:ls) new + | Git.remoteName old == Git.remoteName new = + new : exchange ls new + | otherwise = + old : exchange ls new {- Checks if a given remote has the content for a key inAnnex. - If the remote cannot be accessed, or if it cannot determine @@ -192,32 +192,32 @@ inAnnex r key | Git.repoIsHttp r = checkhttp =<< getHttpHeaders | Git.repoIsUrl r = checkremote | otherwise = checklocal - where - checkhttp headers = liftIO $ go undefined $ keyUrls r key - where - go e [] = return $ Left e - go _ (u:us) = do - res <- catchMsgIO $ - Url.check u headers (keySize key) - case res of - Left e -> go e us - v -> return v - checkremote = do - showAction $ "checking " ++ Git.repoDescribe r - onRemote r (check, unknown) "inannex" [Param (key2file key)] [] - where - check c p = dispatch <$> safeSystem c p - dispatch ExitSuccess = Right True - dispatch (ExitFailure 1) = Right False - dispatch _ = unknown - checklocal = guardUsable r unknown $ dispatch <$> check - where - check = liftIO $ catchMsgIO $ onLocal r $ - Annex.Content.inAnnexSafe key - dispatch (Left e) = Left e - dispatch (Right (Just b)) = Right b - dispatch (Right Nothing) = unknown - unknown = Left $ "unable to check " ++ Git.repoDescribe r + where + checkhttp headers = liftIO $ go undefined $ keyUrls r key + where + go e [] = return $ Left e + go _ (u:us) = do + res <- catchMsgIO $ + Url.check u headers (keySize key) + case res of + Left e -> go e us + v -> return v + checkremote = do + showAction $ "checking " ++ Git.repoDescribe r + onRemote r (check, unknown) "inannex" [Param (key2file key)] [] + where + check c p = dispatch <$> safeSystem c p + dispatch ExitSuccess = Right True + dispatch (ExitFailure 1) = Right False + dispatch _ = unknown + checklocal = guardUsable r unknown $ dispatch <$> check + where + check = liftIO $ catchMsgIO $ onLocal r $ + Annex.Content.inAnnexSafe key + dispatch (Left e) = Left e + dispatch (Right (Just b)) = Right b + dispatch (Right Nothing) = unknown + unknown = Left $ "unable to check " ++ Git.repoDescribe r {- Runs an action on a local repository inexpensively, by making an annex - monad using that repository. -} @@ -232,8 +232,8 @@ onLocal r a = do keyUrls :: Git.Repo -> Key -> [String] keyUrls r key = map tourl (annexLocations key) - where - tourl l = Git.repoLocation r ++ "/" ++ l + where + tourl l = Git.repoLocation r ++ "/" ++ l dropKey :: Git.Repo -> Key -> Annex Bool dropKey r key @@ -270,44 +270,44 @@ copyFromRemote r key file dest =<< rsyncParamsRemote r True key dest file | Git.repoIsHttp r = Annex.Content.downloadUrl (keyUrls r key) dest | otherwise = error "copying from non-ssh, non-http repo not supported" - where - {- Feed local rsync's progress info back to the remote, - - by forking a feeder thread that runs - - git-annex-shell transferinfo at the same time - - git-annex-shell sendkey is running. - - - - Note that it actually waits for rsync to indicate - - progress before starting transferinfo, in order - - to ensure ssh connection caching works and reuses - - the connection set up for the sendkey. - - - - Also note that older git-annex-shell does not support - - transferinfo, so stderr is dropped and failure ignored. - -} - feedprogressback a = do - u <- getUUID - let fields = (Fields.remoteUUID, fromUUID u) - : maybe [] (\f -> [(Fields.associatedFile, f)]) file - Just (cmd, params) <- git_annex_shell r "transferinfo" - [Param $ key2file key] fields - v <- liftIO $ newEmptySampleVar - tid <- liftIO $ forkIO $ void $ tryIO $ do - bytes <- readSampleVar v - p <- createProcess $ - (proc cmd (toCommand params)) - { std_in = CreatePipe - , std_err = CreatePipe - } - hClose $ stderrHandle p - let h = stdinHandle p - let send b = do - hPutStrLn h $ show b - hFlush h - send bytes - forever $ - send =<< readSampleVar v - let feeder = writeSampleVar v - bracketIO noop (const $ tryIO $ killThread tid) (a feeder) + where + {- Feed local rsync's progress info back to the remote, + - by forking a feeder thread that runs + - git-annex-shell transferinfo at the same time + - git-annex-shell sendkey is running. + - + - Note that it actually waits for rsync to indicate + - progress before starting transferinfo, in order + - to ensure ssh connection caching works and reuses + - the connection set up for the sendkey. + - + - Also note that older git-annex-shell does not support + - transferinfo, so stderr is dropped and failure ignored. + -} + feedprogressback a = do + u <- getUUID + let fields = (Fields.remoteUUID, fromUUID u) + : maybe [] (\f -> [(Fields.associatedFile, f)]) file + Just (cmd, params) <- git_annex_shell r "transferinfo" + [Param $ key2file key] fields + v <- liftIO $ newEmptySV + tid <- liftIO $ forkIO $ void $ tryIO $ do + bytes <- readSV v + p <- createProcess $ + (proc cmd (toCommand params)) + { std_in = CreatePipe + , std_err = CreatePipe + } + hClose $ stderrHandle p + let h = stdinHandle p + let send b = do + hPutStrLn h $ show b + hFlush h + send bytes + forever $ + send =<< readSV v + let feeder = writeSV v + bracketIO noop (const $ tryIO $ killThread tid) (a feeder) copyFromRemoteCheap :: Git.Repo -> Key -> FilePath -> Annex Bool copyFromRemoteCheap r key file @@ -358,26 +358,26 @@ rsyncHelper callback params = do rsyncOrCopyFile :: [CommandParam] -> FilePath -> FilePath -> MeterUpdate -> Annex Bool rsyncOrCopyFile rsyncparams src dest p = ifM (sameDeviceIds src dest) (docopy, dorsync) - where - sameDeviceIds a b = (==) <$> (getDeviceId a) <*> (getDeviceId b) - getDeviceId f = deviceID <$> liftIO (getFileStatus $ parentDir f) - dorsync = rsyncHelper (Just p) $ - rsyncparams ++ [Param src, Param dest] - docopy = liftIO $ bracket - (forkIO $ watchfilesize 0) - (void . tryIO . killThread) - (const $ copyFileExternal src dest) - watchfilesize oldsz = do - threadDelay 500000 -- 0.5 seconds - v <- catchMaybeIO $ - fromIntegral . fileSize - <$> getFileStatus dest - case v of - Just sz - | sz /= oldsz -> do - p sz - watchfilesize sz - _ -> watchfilesize oldsz + where + sameDeviceIds a b = (==) <$> (getDeviceId a) <*> (getDeviceId b) + getDeviceId f = deviceID <$> liftIO (getFileStatus $ parentDir f) + dorsync = rsyncHelper (Just p) $ + rsyncparams ++ [Param src, Param dest] + docopy = liftIO $ bracket + (forkIO $ watchfilesize 0) + (void . tryIO . killThread) + (const $ copyFileExternal src dest) + watchfilesize oldsz = do + threadDelay 500000 -- 0.5 seconds + v <- catchMaybeIO $ + fromIntegral . fileSize + <$> getFileStatus dest + case v of + Just sz + | sz /= oldsz -> do + p sz + watchfilesize sz + _ -> watchfilesize oldsz {- Generates rsync parameters that ssh to the remote and asks it - to either receive or send the key's content. -} @@ -396,42 +396,43 @@ rsyncParamsRemote r sending key file afile = do if sending then return $ o ++ rsyncopts eparam dummy (File file) else return $ o ++ rsyncopts eparam (File file) dummy - where - rsyncopts ps source dest - | end ps == [dashdash] = ps ++ [source, dest] - | otherwise = ps ++ [dashdash, source, dest] - dashdash = Param "--" - -- The rsync shell parameter controls where rsync - -- goes, so the source/dest parameter can be a dummy value, - -- that just enables remote rsync mode. - -- For maximum compatability with some patched rsyncs, - -- the dummy value needs to still contain a hostname, - -- even though this hostname will never be used. - dummy = Param "dummy:" + where + rsyncopts ps source dest + | end ps == [dashdash] = ps ++ [source, dest] + | otherwise = ps ++ [dashdash, source, dest] + dashdash = Param "--" + {- The rsync shell parameter controls where rsync + - goes, so the source/dest parameter can be a dummy value, + - that just enables remote rsync mode. + - For maximum compatability with some patched rsyncs, + - the dummy value needs to still contain a hostname, + - even though this hostname will never be used. -} + dummy = Param "dummy:" rsyncParams :: Git.Repo -> Annex [CommandParam] rsyncParams r = do o <- getRemoteConfig r "rsync-options" "" return $ options ++ map Param (words o) - where - -- --inplace to resume partial files - options = [Params "-p --progress --inplace"] + where + -- --inplace to resume partial files + options = [Params "-p --progress --inplace"] commitOnCleanup :: Git.Repo -> Annex a -> Annex a commitOnCleanup r a = go `after` a - where - go = Annex.addCleanup (Git.repoLocation r) cleanup - cleanup - | not $ Git.repoIsUrl r = liftIO $ onLocal r $ - doQuietSideAction $ - Annex.Branch.commit "update" - | otherwise = void $ do - Just (shellcmd, shellparams) <- - git_annex_shell r "commit" [] [] - -- Throw away stderr, since the remote may not - -- have a new enough git-annex shell to - -- support committing. - let cmd = shellcmd ++ " " - ++ unwords (map shellEscape $ toCommand shellparams) - ++ ">/dev/null 2>/dev/null" - liftIO $ boolSystem "sh" [Param "-c", Param cmd] + where + go = Annex.addCleanup (Git.repoLocation r) cleanup + cleanup + | not $ Git.repoIsUrl r = liftIO $ onLocal r $ + doQuietSideAction $ + Annex.Branch.commit "update" + | otherwise = void $ do + Just (shellcmd, shellparams) <- + git_annex_shell r "commit" [] [] + + -- Throw away stderr, since the remote may not + -- have a new enough git-annex shell to + -- support committing. + liftIO $ catchMaybeIO $ do + withQuietOutput createProcessSuccess $ + proc shellcmd $ + toCommand shellparams diff --git a/Remote/Helper/Encryptable.hs b/Remote/Helper/Encryptable.hs index 8ed2fed..12c7d37 100644 --- a/Remote/Helper/Encryptable.hs +++ b/Remote/Helper/Encryptable.hs @@ -32,12 +32,12 @@ encryptionSetup c = case (M.lookup "encryption" c, extractCipher c) of (Just "shared", Nothing) -> use "encryption setup" $ genSharedCipher (Just keyid, Nothing) -> use "encryption setup" $ genEncryptedCipher keyid (Just keyid, Just v) -> use "encryption updated" $ updateEncryptedCipher keyid v - where - cannotchange = error "Cannot change encryption type of existing remote." - use m a = do - cipher <- liftIO a - showNote $ m ++ " " ++ describeCipher cipher - return $ M.delete "encryption" $ storeCipher c cipher + where + cannotchange = error "Cannot change encryption type of existing remote." + use m a = do + cipher <- liftIO a + showNote $ m ++ " " ++ describeCipher cipher + return $ M.delete "encryption" $ storeCipher c cipher {- Modifies a Remote to support encryption. - @@ -58,35 +58,35 @@ encryptableRemote c storeKeyEncrypted retrieveKeyFileEncrypted r = hasKey = withkey $ hasKey r, cost = cost r + encryptedRemoteCostAdj } - where - store k f p = cip k >>= maybe - (storeKey r k f p) - (\enck -> storeKeyEncrypted enck k p) - retrieve k f d = cip k >>= maybe - (retrieveKeyFile r k f d) - (\enck -> retrieveKeyFileEncrypted enck k d) - retrieveCheap k d = cip k >>= maybe - (retrieveKeyFileCheap r k d) - (\_ -> return False) - withkey a k = cip k >>= maybe (a k) (a . snd) - cip = cipherKey c + where + store k f p = cip k >>= maybe + (storeKey r k f p) + (\enck -> storeKeyEncrypted enck k p) + retrieve k f d = cip k >>= maybe + (retrieveKeyFile r k f d) + (\enck -> retrieveKeyFileEncrypted enck k d) + retrieveCheap k d = cip k >>= maybe + (retrieveKeyFileCheap r k d) + (\_ -> return False) + withkey a k = cip k >>= maybe (a k) (a . snd) + cip = cipherKey c {- Gets encryption Cipher. The decrypted Ciphers are cached in the Annex - state. -} remoteCipher :: RemoteConfig -> Annex (Maybe Cipher) remoteCipher c = go $ extractCipher c - where - go Nothing = return Nothing - go (Just encipher) = do - cache <- Annex.getState Annex.ciphers - case M.lookup encipher cache of - Just cipher -> return $ Just cipher - Nothing -> decrypt encipher cache - decrypt encipher cache = do - showNote "gpg" - cipher <- liftIO $ decryptCipher encipher - Annex.changeState (\s -> s { Annex.ciphers = M.insert encipher cipher cache }) - return $ Just cipher + where + go Nothing = return Nothing + go (Just encipher) = do + cache <- Annex.getState Annex.ciphers + case M.lookup encipher cache of + Just cipher -> return $ Just cipher + Nothing -> decrypt encipher cache + decrypt encipher cache = do + showNote "gpg" + cipher <- liftIO $ decryptCipher encipher + Annex.changeState (\s -> s { Annex.ciphers = M.insert encipher cipher cache }) + return $ Just cipher {- Checks if there is a trusted (non-shared) cipher. -} isTrustedCipher :: RemoteConfig -> Bool @@ -97,16 +97,16 @@ isTrustedCipher c = cipherKey :: Maybe RemoteConfig -> Key -> Annex (Maybe (Cipher, Key)) cipherKey Nothing _ = return Nothing cipherKey (Just c) k = maybe Nothing encrypt <$> remoteCipher c - where - encrypt ciphertext = Just (ciphertext, encryptKey ciphertext k) + where + encrypt ciphertext = Just (ciphertext, encryptKey ciphertext k) {- Stores an StorableCipher in a remote's configuration. -} storeCipher :: RemoteConfig -> StorableCipher -> RemoteConfig storeCipher c (SharedCipher t) = M.insert "cipher" (toB64 t) c storeCipher c (EncryptedCipher t ks) = M.insert "cipher" (toB64 t) $ M.insert "cipherkeys" (showkeys ks) c - where - showkeys (KeyIds l) = join "," l + where + showkeys (KeyIds l) = join "," l {- Extracts an StorableCipher from a remote's configuration. -} extractCipher :: RemoteConfig -> Maybe StorableCipher @@ -115,5 +115,5 @@ extractCipher c = (Just t, Just ks) -> Just $ EncryptedCipher (fromB64 t) (readkeys ks) (Just t, Nothing) -> Just $ SharedCipher (fromB64 t) _ -> Nothing - where - readkeys = KeyIds . split "," + where + readkeys = KeyIds . split "," diff --git a/Remote/Helper/Hooks.hs b/Remote/Helper/Hooks.hs index eb788bc..91190d8 100644 --- a/Remote/Helper/Hooks.hs +++ b/Remote/Helper/Hooks.hs @@ -25,16 +25,16 @@ addHooks r = addHooks' r <$> lookupHook r "start" <*> lookupHook r "stop" addHooks' :: Remote -> Maybe String -> Maybe String -> Remote addHooks' r Nothing Nothing = r addHooks' r starthook stophook = r' - where - r' = r - { storeKey = \k f p -> wrapper $ storeKey r k f p - , retrieveKeyFile = \k f d -> wrapper $ retrieveKeyFile r k f d - , retrieveKeyFileCheap = \k f -> wrapper $ retrieveKeyFileCheap r k f - , removeKey = \k -> wrapper $ removeKey r k - , hasKey = \k -> wrapper $ hasKey r k - } - where - wrapper = runHooks r' starthook stophook + where + r' = r + { storeKey = \k f p -> wrapper $ storeKey r k f p + , retrieveKeyFile = \k f d -> wrapper $ retrieveKeyFile r k f d + , retrieveKeyFileCheap = \k f -> wrapper $ retrieveKeyFileCheap r k f + , removeKey = \k -> wrapper $ removeKey r k + , hasKey = \k -> wrapper $ hasKey r k + } + where + wrapper = runHooks r' starthook stophook runHooks :: Remote -> Maybe String -> Maybe String -> Annex a -> Annex a runHooks r starthook stophook a = do @@ -44,50 +44,49 @@ runHooks r starthook stophook a = do liftIO $ createDirectoryIfMissing True dir firstrun lck a - where - remoteid = show (uuid r) - run Nothing = noop - run (Just command) = void $ liftIO $ - boolSystem "sh" [Param "-c", Param command] - firstrun lck = do - -- Take a shared lock; This indicates that git-annex - -- is using the remote, and prevents other instances - -- of it from running the stophook. If another - -- instance is shutting down right now, this - -- will block waiting for its exclusive lock to clear. - lockFile lck + where + remoteid = show (uuid r) + run Nothing = noop + run (Just command) = void $ liftIO $ + boolSystem "sh" [Param "-c", Param command] + firstrun lck = do + -- Take a shared lock; This indicates that git-annex + -- is using the remote, and prevents other instances + -- of it from running the stophook. If another + -- instance is shutting down right now, this + -- will block waiting for its exclusive lock to clear. + lockFile lck - -- The starthook is run even if some other git-annex - -- is already running, and ran it before. - -- It would be difficult to use locking to ensure - -- it's only run once, and it's also possible for - -- git-annex to be interrupted before it can run the - -- stophook, in which case the starthook - -- would be run again by the next git-annex. - -- So, requiring idempotency is the right approach. - run starthook + -- The starthook is run even if some other git-annex + -- is already running, and ran it before. + -- It would be difficult to use locking to ensure + -- it's only run once, and it's also possible for + -- git-annex to be interrupted before it can run the + -- stophook, in which case the starthook + -- would be run again by the next git-annex. + -- So, requiring idempotency is the right approach. + run starthook - Annex.addCleanup (remoteid ++ "-stop-command") $ - runstop lck - runstop lck = do - -- Drop any shared lock we have, and take an - -- exclusive lock, without blocking. If the lock - -- succeeds, we're the only process using this remote, - -- so can stop it. - unlockFile lck - mode <- annexFileMode - fd <- liftIO $ noUmask mode $ - openFd lck ReadWrite (Just mode) defaultFileFlags - v <- liftIO $ tryIO $ - setLock fd (WriteLock, AbsoluteSeek, 0, 0) - case v of - Left _ -> noop - Right _ -> run stophook - liftIO $ closeFd fd + Annex.addCleanup (remoteid ++ "-stop-command") $ runstop lck + runstop lck = do + -- Drop any shared lock we have, and take an + -- exclusive lock, without blocking. If the lock + -- succeeds, we're the only process using this remote, + -- so can stop it. + unlockFile lck + mode <- annexFileMode + fd <- liftIO $ noUmask mode $ + openFd lck ReadWrite (Just mode) defaultFileFlags + v <- liftIO $ tryIO $ + setLock fd (WriteLock, AbsoluteSeek, 0, 0) + case v of + Left _ -> noop + Right _ -> run stophook + liftIO $ closeFd fd lookupHook :: Remote -> String -> Annex (Maybe String) lookupHook r n = go =<< getRemoteConfig (repo r) hookname "" - where - go "" = return Nothing - go command = return $ Just command - hookname = n ++ "-command" + where + go "" = return Nothing + go command = return $ Just command + hookname = n ++ "-command" diff --git a/Remote/Helper/Special.hs b/Remote/Helper/Special.hs index 3f6c9c1..f25ee8e 100644 --- a/Remote/Helper/Special.hs +++ b/Remote/Helper/Special.hs @@ -23,18 +23,18 @@ findSpecialRemotes :: String -> Annex [Git.Repo] findSpecialRemotes s = do m <- fromRepo Git.config liftIO $ mapM construct $ remotepairs m - where - remotepairs = M.toList . M.filterWithKey match - construct (k,_) = Git.Construct.remoteNamedFromKey k Git.Construct.fromUnknown - match k _ = startswith "remote." k && endswith (".annex-"++s) k + where + remotepairs = M.toList . M.filterWithKey match + construct (k,_) = Git.Construct.remoteNamedFromKey k Git.Construct.fromUnknown + match k _ = startswith "remote." k && endswith (".annex-"++s) k {- Sets up configuration for a special remote in .git/config. -} gitConfigSpecialRemote :: UUID -> RemoteConfig -> String -> String -> Annex () gitConfigSpecialRemote u c k v = do set ("annex-"++k) v set ("annex-uuid") (fromUUID u) - where - set a b = inRepo $ Git.Command.run "config" - [Param (configsetting a), Param b] - remotename = fromJust (M.lookup "name" c) - configsetting s = "remote." ++ remotename ++ "." ++ s + where + set a b = inRepo $ Git.Command.run "config" + [Param (configsetting a), Param b] + remotename = fromJust (M.lookup "name" c) + configsetting s = "remote." ++ remotename ++ "." ++ s diff --git a/Remote/Helper/Ssh.hs b/Remote/Helper/Ssh.hs index 4434bc6..b6da80e 100644 --- a/Remote/Helper/Ssh.hs +++ b/Remote/Helper/Ssh.hs @@ -1,6 +1,6 @@ {- git-annex remote access with ssh - - - Copyright 2011.2012 Joey Hess + - Copyright 2011,2012 Joey Hess - - Licensed under the GNU GPL version 3 or higher. -} @@ -34,22 +34,22 @@ git_annex_shell r command params fields sshparams <- sshToRepo r [Param $ sshcmd uuid ] return $ Just ("ssh", sshparams) | otherwise = return Nothing - where - dir = Git.repoPath r - shellcmd = "git-annex-shell" - shellopts = Param command : File dir : params - sshcmd uuid = unwords $ - shellcmd : map shellEscape (toCommand shellopts) ++ - uuidcheck uuid ++ - map shellEscape (toCommand fieldopts) - uuidcheck NoUUID = [] - uuidcheck (UUID u) = ["--uuid", u] - fieldopts - | null fields = [] - | otherwise = fieldsep : map fieldopt fields ++ [fieldsep] - fieldsep = Param "--" - fieldopt (field, value) = Param $ - fieldName field ++ "=" ++ value + where + dir = Git.repoPath r + shellcmd = "git-annex-shell" + shellopts = Param command : File dir : params + sshcmd uuid = unwords $ + shellcmd : map shellEscape (toCommand shellopts) ++ + uuidcheck uuid ++ + map shellEscape (toCommand fieldopts) + uuidcheck NoUUID = [] + uuidcheck (UUID u) = ["--uuid", u] + fieldopts + | null fields = [] + | otherwise = fieldsep : map fieldopt fields ++ [fieldsep] + fieldsep = Param "--" + fieldopt (field, value) = Param $ + fieldName field ++ "=" ++ value {- Uses a supplied function (such as boolSystem) to run a git-annex-shell - command on a remote. diff --git a/Remote/Hook.hs b/Remote/Hook.hs index 716a818..f9a143c 100644 --- a/Remote/Hook.hs +++ b/Remote/Hook.hs @@ -64,19 +64,18 @@ hookSetup u c = do hookEnv :: Key -> Maybe FilePath -> IO (Maybe [(String, String)]) hookEnv k f = Just <$> mergeenv (fileenv f ++ keyenv) - where - mergeenv l = M.toList . - M.union (M.fromList l) - <$> M.fromList <$> getEnvironment - env s v = ("ANNEX_" ++ s, v) - keyenv = - [ env "KEY" (key2file k) - , env "HASH_1" (hashbits !! 0) - , env "HASH_2" (hashbits !! 1) - ] - fileenv Nothing = [] - fileenv (Just file) = [env "FILE" file] - hashbits = map takeDirectory $ splitPath $ hashDirMixed k + where + mergeenv l = M.toList . M.union (M.fromList l) + <$> M.fromList <$> getEnvironment + env s v = ("ANNEX_" ++ s, v) + keyenv = catMaybes + [ Just $ env "KEY" (key2file k) + , env "HASH_1" <$> headMaybe hashbits + , env "HASH_2" <$> headMaybe (drop 1 hashbits) + ] + fileenv Nothing = [] + fileenv (Just file) = [env "FILE" file] + hashbits = map takeDirectory $ splitPath $ hashDirMixed k lookupHook :: String -> String -> Annex (Maybe String) lookupHook hooktype hook =do @@ -86,22 +85,20 @@ lookupHook hooktype hook =do warning $ "missing configuration for " ++ hookname return Nothing else return $ Just command - where - hookname = hooktype ++ "-" ++ hook ++ "-hook" + where + hookname = hooktype ++ "-" ++ hook ++ "-hook" runHook :: String -> String -> Key -> Maybe FilePath -> Annex Bool -> Annex Bool runHook hooktype hook k f a = maybe (return False) run =<< lookupHook hooktype hook - where - run command = do - showOutput -- make way for hook output - ifM (liftIO $ - boolSystemEnv "sh" [Param "-c", Param command] - =<< hookEnv k f) - ( a - , do - warning $ hook ++ " hook exited nonzero!" - return False - ) + where + run command = do + showOutput -- make way for hook output + ifM (liftIO $ boolSystemEnv "sh" [Param "-c", Param command] =<< hookEnv k f) + ( a + , do + warning $ hook ++ " hook exited nonzero!" + return False + ) store :: String -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool store h k _f _p = do @@ -134,9 +131,9 @@ checkPresent r h k = do showAction $ "checking " ++ Git.repoDescribe r v <- lookupHook h "checkpresent" liftIO $ catchMsgIO $ check v - where - findkey s = key2file k `elem` lines s - check Nothing = error "checkpresent hook misconfigured" - check (Just hook) = do - env <- hookEnv k Nothing - findkey <$> readProcessEnv "sh" ["-c", hook] env + where + findkey s = key2file k `elem` lines s + check Nothing = error "checkpresent hook misconfigured" + check (Just hook) = do + env <- hookEnv k Nothing + findkey <$> readProcessEnv "sh" ["-c", hook] env diff --git a/Remote/List.hs b/Remote/List.hs index 234f310..ea1d61c 100644 --- a/Remote/List.hs +++ b/Remote/List.hs @@ -56,8 +56,8 @@ remoteList = do Annex.changeState $ \s -> s { Annex.remotes = rs' } return rs' else return rs - where - process m t = enumerate t >>= mapM (remoteGen m t) + where + process m t = enumerate t >>= mapM (remoteGen m t) {- Forces the remoteList to be re-generated, re-reading the git config. -} remoteListRefresh :: Annex [Remote] @@ -81,11 +81,11 @@ updateRemote remote = do m <- readRemoteLog remote' <- updaterepo $ repo remote remoteGen m (remotetype remote) remote' - where - updaterepo r - | Git.repoIsLocal r || Git.repoIsLocalUnknown r = - Remote.Git.configRead r - | otherwise = return r + where + updaterepo r + | Git.repoIsLocal r || Git.repoIsLocalUnknown r = + Remote.Git.configRead r + | otherwise = return r {- All remotes that are not ignored. -} enabledRemoteList :: Annex [Remote] diff --git a/Remote/Rsync.hs b/Remote/Rsync.hs index c3ef94a..1d5f2d2 100644 --- a/Remote/Rsync.hs +++ b/Remote/Rsync.hs @@ -72,14 +72,14 @@ genRsyncOpts r c = do <$> getRemoteConfig r "rsync-options" "" let escape = maybe True (\m -> M.lookup "shellescape" m /= Just "no") c return $ RsyncOpts url opts escape - where - safe o - -- Don't allow user to pass --delete to rsync; - -- that could cause it to delete other keys - -- in the same hash bucket as a key it sends. - | o == "--delete" = False - | o == "--delete-excluded" = False - | otherwise = True + where + safe o + -- Don't allow user to pass --delete to rsync; + -- that could cause it to delete other keys + -- in the same hash bucket as a key it sends. + | o == "--delete" = False + | o == "--delete-excluded" = False + | otherwise = True rsyncSetup :: UUID -> RemoteConfig -> Annex RemoteConfig rsyncSetup u c = do @@ -100,9 +100,9 @@ rsyncEscape o s rsyncUrls :: RsyncOpts -> Key -> [String] rsyncUrls o k = map use annexHashes - where - use h = rsyncUrl o h k rsyncEscape o (f f) - f = keyFile k + where + use h = rsyncUrl o h k rsyncEscape o (f f) + f = keyFile k store :: RsyncOpts -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool store o k _f p = rsyncSend o p k <=< inRepo $ gitAnnexLocation k @@ -146,18 +146,18 @@ remove o k = withRsyncScratchDir $ \tmp -> liftIO $ do , Param $ addTrailingPathSeparator dummy , Param $ rsyncUrl o ] - where - {- Specify include rules to match the directories where the - - content could be. Note that the parent directories have - - to also be explicitly included, due to how rsync - - traverses directories. -} - includes = concatMap use annexHashes - use h = let dir = h k in - [ parentDir dir - , dir - -- match content directory and anything in it - , dir keyFile k "***" - ] + where + {- Specify include rules to match the directories where the + - content could be. Note that the parent directories have + - to also be explicitly included, due to how rsync + - traverses directories. -} + includes = concatMap use annexHashes + use h = let dir = h k in + [ parentDir dir + , dir + -- match content directory and anything in it + , dir keyFile k "***" + ] checkPresent :: Git.Repo -> RsyncOpts -> Key -> Annex (Either String Bool) checkPresent r o k = do @@ -165,10 +165,13 @@ checkPresent r o k = do -- note: Does not currently differentiate between rsync failing -- to connect, and the file not being present. Right <$> check - where - check = untilTrue (rsyncUrls o k) $ \u -> - liftIO $ boolSystem "sh" [Param "-c", Param (cmd u)] - cmd u = "rsync --quiet " ++ shellEscape u ++ " 2>/dev/null" + where + check = untilTrue (rsyncUrls o k) $ \u -> + liftIO $ catchBoolIO $ do + withQuietOutput createProcessSuccess $ + proc "rsync" $ toCommand $ + rsyncOptions o ++ [Param u] + return True {- Rsync params to enable resumes of sending files safely, - ensure that files are only moved into place once complete @@ -187,9 +190,9 @@ withRsyncScratchDir a = do nuke tmp liftIO $ createDirectoryIfMissing True tmp nuke tmp `after` a tmp - where - nuke d = liftIO $ whenM (doesDirectoryExist d) $ - removeDirectoryRecursive d + where + nuke d = liftIO $ whenM (doesDirectoryExist d) $ + removeDirectoryRecursive d rsyncRemote :: RsyncOpts -> (Maybe MeterUpdate) -> [CommandParam] -> Annex Bool rsyncRemote o callback params = do @@ -200,9 +203,9 @@ rsyncRemote o callback params = do showLongNote "rsync failed -- run git annex again to resume file transfer" return False ) - where - defaultParams = [Params "--progress"] - ps = rsyncOptions o ++ defaultParams ++ params + where + defaultParams = [Params "--progress"] + ps = rsyncOptions o ++ defaultParams ++ params {- To send a single key is slightly tricky; need to build up a temporary directory structure to pass to rsync so it can create the hash diff --git a/Remote/S3.hs b/Remote/S3.hs index c4da0b2..0c9d523 100644 --- a/Remote/S3.hs +++ b/Remote/S3.hs @@ -48,74 +48,71 @@ gen' r u c cst = (storeEncrypted this) (retrieveEncrypted this) this - where - this = Remote { - uuid = u, - cost = cst, - name = Git.repoDescribe r, - storeKey = store this, - retrieveKeyFile = retrieve this, - retrieveKeyFileCheap = retrieveCheap this, - removeKey = remove this, - hasKey = checkPresent this, - hasKeyCheap = False, - whereisKey = Nothing, - config = c, - repo = r, - localpath = Nothing, - readonly = False, - remotetype = remote - } + where + this = Remote { + uuid = u, + cost = cst, + name = Git.repoDescribe r, + storeKey = store this, + retrieveKeyFile = retrieve this, + retrieveKeyFileCheap = retrieveCheap this, + removeKey = remove this, + hasKey = checkPresent this, + hasKeyCheap = False, + whereisKey = Nothing, + config = c, + repo = r, + localpath = Nothing, + readonly = False, + remotetype = remote + } s3Setup :: UUID -> RemoteConfig -> Annex RemoteConfig s3Setup u c = handlehost $ M.lookup "host" c - where - remotename = fromJust (M.lookup "name" c) - defbucket = remotename ++ "-" ++ fromUUID u - defaults = M.fromList - [ ("datacenter", "US") - , ("storageclass", "STANDARD") - , ("host", defaultAmazonS3Host) - , ("port", show defaultAmazonS3Port) - , ("bucket", defbucket) - ] + where + remotename = fromJust (M.lookup "name" c) + defbucket = remotename ++ "-" ++ fromUUID u + defaults = M.fromList + [ ("datacenter", "US") + , ("storageclass", "STANDARD") + , ("host", defaultAmazonS3Host) + , ("port", show defaultAmazonS3Port) + , ("bucket", defbucket) + ] - handlehost Nothing = defaulthost - handlehost (Just h) - | ".archive.org" `isSuffixOf` map toLower h = archiveorg - | otherwise = defaulthost + handlehost Nothing = defaulthost + handlehost (Just h) + | ".archive.org" `isSuffixOf` map toLower h = archiveorg + | otherwise = defaulthost - use fullconfig = do - gitConfigSpecialRemote u fullconfig "s3" "true" - s3SetCreds fullconfig u + use fullconfig = do + gitConfigSpecialRemote u fullconfig "s3" "true" + s3SetCreds fullconfig u - defaulthost = do - c' <- encryptionSetup c - let fullconfig = c' `M.union` defaults - genBucket fullconfig u - use fullconfig + defaulthost = do + c' <- encryptionSetup c + let fullconfig = c' `M.union` defaults + genBucket fullconfig u + use fullconfig - archiveorg = do - showNote "Internet Archive mode" - maybe (error "specify bucket=") (const noop) $ - M.lookup "bucket" archiveconfig - use archiveconfig - where - archiveconfig = - -- hS3 does not pass through - -- x-archive-* headers - M.mapKeys (replace "x-archive-" "x-amz-") $ - -- encryption does not make sense here - M.insert "encryption" "none" $ - M.union c $ - -- special constraints on key names - M.insert "mungekeys" "ia" $ - -- bucket created only when files - -- are uploaded - M.insert "x-amz-auto-make-bucket" "1" $ - -- no default bucket name; should - -- be human-readable - M.delete "bucket" defaults + archiveorg = do + showNote "Internet Archive mode" + maybe (error "specify bucket=") (const noop) $ + M.lookup "bucket" archiveconfig + use archiveconfig + where + archiveconfig = + -- hS3 does not pass through x-archive-* headers + M.mapKeys (replace "x-archive-" "x-amz-") $ + -- encryption does not make sense here + M.insert "encryption" "none" $ + M.union c $ + -- special constraints on key names + M.insert "mungekeys" "ia" $ + -- bucket created only when files are uploaded + M.insert "x-amz-auto-make-bucket" "1" $ + -- no default bucket name; should be human-readable + M.delete "bucket" defaults store :: Remote -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool store r k _f _p = s3Action r False $ \(conn, bucket) -> do @@ -143,15 +140,15 @@ storeHelper (conn, bucket) r k file = do S3Object bucket (bucketFile r k) "" (("Content-Length", show size) : xheaders) content sendObject conn object - where - storageclass = - case fromJust $ M.lookup "storageclass" $ fromJust $ config r of - "REDUCED_REDUNDANCY" -> REDUCED_REDUNDANCY - _ -> STANDARD - getsize = fileSize <$> (liftIO $ getFileStatus file) - - xheaders = filter isxheader $ M.assocs $ fromJust $ config r - isxheader (h, _) = "x-amz-" `isPrefixOf` h + where + storageclass = + case fromJust $ M.lookup "storageclass" $ fromJust $ config r of + "REDUCED_REDUNDANCY" -> REDUCED_REDUNDANCY + _ -> STANDARD + getsize = fileSize <$> (liftIO $ getFileStatus file) + + xheaders = filter isxheader $ M.assocs $ fromJust $ config r + isxheader (h, _) = "x-amz-" `isPrefixOf` h retrieve :: Remote -> Key -> AssociatedFile -> FilePath -> Annex Bool retrieve r k _f d = s3Action r False $ \(conn, bucket) -> do @@ -188,8 +185,8 @@ checkPresent r k = s3Action r noconn $ \(conn, bucket) -> do Right _ -> return $ Right True Left (AWSError _ _) -> return $ Right False Left e -> return $ Left (s3Error e) - where - noconn = Left $ error "S3 not configured" + where + noconn = Left $ error "S3 not configured" s3Warning :: ReqError -> Annex Bool s3Warning e = do @@ -215,12 +212,12 @@ s3Action r noconn action = do bucketFile :: Remote -> Key -> FilePath bucketFile r = munge . key2file - where - munge s = case M.lookup "mungekeys" c of - Just "ia" -> iaMunge $ fileprefix ++ s - _ -> fileprefix ++ s - fileprefix = M.findWithDefault "" "fileprefix" c - c = fromJust $ config r + where + munge s = case M.lookup "mungekeys" c of + Just "ia" -> iaMunge $ fileprefix ++ s + _ -> fileprefix ++ s + fileprefix = M.findWithDefault "" "fileprefix" c + c = fromJust $ config r bucketKey :: Remote -> String -> Key -> S3Object bucketKey r bucket k = S3Object bucket (bucketFile r k) "" [] L.empty @@ -230,12 +227,12 @@ bucketKey r bucket k = S3Object bucket (bucketFile r k) "" [] L.empty - encoded. -} iaMunge :: String -> String iaMunge = (>>= munge) - where - munge c - | isAsciiUpper c || isAsciiLower c || isNumber c = [c] - | c `elem` "_-.\"" = [c] - | isSpace c = [] - | otherwise = "&" ++ show (ord c) ++ ";" + where + munge c + | isAsciiUpper c || isAsciiLower c || isNumber c = [c] + | c `elem` "_-.\"" = [c] + | isSpace c = [] + | otherwise = "&" ++ show (ord c) ++ ";" genBucket :: RemoteConfig -> UUID -> Annex () genBucket c u = do @@ -251,9 +248,9 @@ genBucket c u = do case res of Right _ -> noop Left err -> s3Error err - where - bucket = fromJust $ M.lookup "bucket" c - datacenter = fromJust $ M.lookup "datacenter" c + where + bucket = fromJust $ M.lookup "bucket" c + datacenter = fromJust $ M.lookup "datacenter" c s3ConnectionRequired :: RemoteConfig -> UUID -> Annex AWSConnection s3ConnectionRequired c u = @@ -267,46 +264,46 @@ s3Connection c u = do _ -> do warning $ "Set both " ++ s3AccessKey ++ " and " ++ s3SecretKey ++ " to use S3" return Nothing - where - host = fromJust $ M.lookup "host" c - port = let s = fromJust $ M.lookup "port" c in - case reads s of - [(p, _)] -> p - _ -> error $ "bad S3 port value: " ++ s + where + host = fromJust $ M.lookup "host" c + port = let s = fromJust $ M.lookup "port" c in + case reads s of + [(p, _)] -> p + _ -> error $ "bad S3 port value: " ++ s {- S3 creds come from the environment if set, otherwise from the cache - in gitAnnexCredsDir, or failing that, might be stored encrypted in - the remote's config. -} s3GetCreds :: RemoteConfig -> UUID -> Annex (Maybe (String, String)) s3GetCreds c u = maybe fromcache (return . Just) =<< liftIO getenv - where - getenv = liftM2 (,) - <$> get s3AccessKey - <*> get s3SecretKey - where - get = catchMaybeIO . getEnv - fromcache = do - d <- fromRepo gitAnnexCredsDir - let f = d fromUUID u - v <- liftIO $ catchMaybeIO $ readFile f - case lines <$> v of - Just (ak:sk:[]) -> return $ Just (ak, sk) - _ -> fromconfig - fromconfig = do - mcipher <- remoteCipher c - case (M.lookup "s3creds" c, mcipher) of - (Just s3creds, Just cipher) -> do - creds <- liftIO $ decrypt s3creds cipher - case creds of - [ak, sk] -> do - s3CacheCreds (ak, sk) u - return $ Just (ak, sk) - _ -> do error "bad s3creds" - _ -> return Nothing - decrypt s3creds cipher = lines <$> - withDecryptedContent cipher - (return $ L.pack $ fromB64 s3creds) - (return . L.unpack) + where + getenv = liftM2 (,) + <$> get s3AccessKey + <*> get s3SecretKey + where + get = catchMaybeIO . getEnv + fromcache = do + d <- fromRepo gitAnnexCredsDir + let f = d fromUUID u + v <- liftIO $ catchMaybeIO $ readFile f + case lines <$> v of + Just (ak:sk:[]) -> return $ Just (ak, sk) + _ -> fromconfig + fromconfig = do + mcipher <- remoteCipher c + case (M.lookup "s3creds" c, mcipher) of + (Just s3creds, Just cipher) -> do + creds <- liftIO $ decrypt s3creds cipher + case creds of + [ak, sk] -> do + s3CacheCreds (ak, sk) u + return $ Just (ak, sk) + _ -> do error "bad s3creds" + _ -> return Nothing + decrypt s3creds cipher = lines + <$> withDecryptedContent cipher + (return $ L.pack $ fromB64 s3creds) + (return . L.unpack) {- Stores S3 creds encrypted in the remote's config if possible to do so - securely, and otherwise locally in gitAnnexCredsDir. -} diff --git a/Remote/Web.hs b/Remote/Web.hs index 78f747a..d722374 100644 --- a/Remote/Web.hs +++ b/Remote/Web.hs @@ -55,13 +55,13 @@ gen r _ _ = downloadKey :: Key -> AssociatedFile -> FilePath -> Annex Bool downloadKey key _file dest = get =<< getUrls key - where - get [] = do - warning "no known url" - return False - get urls = do - showOutput -- make way for download progress bar - downloadUrl urls dest + where + get [] = do + warning "no known url" + return False + get urls = do + showOutput -- make way for download progress bar + downloadUrl urls dest downloadKeyCheap :: Key -> FilePath -> Annex Bool downloadKeyCheap _ _ = return False diff --git a/Seek.hs b/Seek.hs index aeaf26b..cd30986 100644 --- a/Seek.hs +++ b/Seek.hs @@ -35,21 +35,21 @@ withFilesNotInGit a params = do seekunless (null ps && not (null params)) ps dotfiles <- seekunless (null dotps) dotps prepFiltered a $ return $ preserveOrder params (files++dotfiles) - where - (dotps, ps) = partition dotfile params - seekunless True _ = return [] - seekunless _ l = do - force <- Annex.getState Annex.force - g <- gitRepo - liftIO $ Git.Command.leaveZombie <$> LsFiles.notInRepo force l g + where + (dotps, ps) = partition dotfile params + seekunless True _ = return [] + seekunless _ l = do + force <- Annex.getState Annex.force + g <- gitRepo + liftIO $ Git.Command.leaveZombie <$> LsFiles.notInRepo force l g withPathContents :: ((FilePath, FilePath) -> CommandStart) -> CommandSeek withPathContents a params = map a . concat <$> liftIO (mapM get params) - where - get p = ifM (isDirectory <$> getFileStatus p) - ( map (\f -> (f, makeRelative p f)) <$> dirContentsRecursive p - , return [(p, takeFileName p)] - ) + where + get p = ifM (isDirectory <$> getFileStatus p) + ( map (\f -> (f, makeRelative p f)) <$> dirContentsRecursive p + , return [(p, takeFileName p)] + ) withWords :: ([String] -> CommandStart) -> CommandSeek withWords a params = return [a params] @@ -59,10 +59,10 @@ withStrings a params = return $ map a params withPairs :: ((String, String) -> CommandStart) -> CommandSeek withPairs a params = return $ map a $ pairs [] params - where - pairs c [] = reverse c - pairs c (x:y:xs) = pairs ((x,y):c) xs - pairs _ _ = error "expected pairs" + where + pairs c [] = reverse c + pairs c (x:y:xs) = pairs ((x,y):c) xs + pairs _ _ = error "expected pairs" withFilesToBeCommitted :: (String -> CommandStart) -> CommandSeek withFilesToBeCommitted a params = prepFiltered a $ @@ -83,8 +83,8 @@ withFilesUnlocked' typechanged a params = do withKeys :: (Key -> CommandStart) -> CommandSeek withKeys a params = return $ map (a . parse) params - where - parse p = fromMaybe (error "bad key") $ file2key p + where + parse p = fromMaybe (error "bad key") $ file2key p withValue :: Annex v -> (v -> CommandSeek) -> CommandSeek withValue v a params = do @@ -111,10 +111,9 @@ prepFiltered :: (FilePath -> CommandStart) -> Annex [FilePath] -> Annex [Command prepFiltered a fs = do matcher <- Limit.getMatcher map (process matcher) <$> fs - where - process matcher f = do - ok <- matcher f - if ok then a f else return Nothing + where + process matcher f = ifM (matcher $ Annex.FileInfo f f) + ( a f , return Nothing ) notSymlink :: FilePath -> IO Bool notSymlink f = liftIO $ not . isSymbolicLink <$> getSymbolicLinkStatus f diff --git a/Setup.hs b/Setup.hs index 7e07093..7a1f6cc 100644 --- a/Setup.hs +++ b/Setup.hs @@ -30,16 +30,16 @@ myPostInst _ (InstallFlags { installVerbosity }) pkg lbi = do installGitAnnexShell dest verbosity pkg lbi installManpages dest verbosity pkg lbi installDesktopFile dest verbosity pkg lbi - where - dest = NoCopyDest - verbosity = fromFlag installVerbosity + where + dest = NoCopyDest + verbosity = fromFlag installVerbosity installGitAnnexShell :: CopyDest -> Verbosity -> PackageDescription -> LocalBuildInfo -> IO () installGitAnnexShell copyDest verbosity pkg lbi = rawSystemExit verbosity "ln" ["-sf", "git-annex", dstBinDir "git-annex-shell"] - where - dstBinDir = bindir $ absoluteInstallDirs pkg lbi copyDest + where + dstBinDir = bindir $ absoluteInstallDirs pkg lbi copyDest {- See http://www.haskell.org/haskellwiki/Cabal/Developer-FAQ#Installing_manpages - @@ -49,15 +49,15 @@ installGitAnnexShell copyDest verbosity pkg lbi = installManpages :: CopyDest -> Verbosity -> PackageDescription -> LocalBuildInfo -> IO () installManpages copyDest verbosity pkg lbi = installOrdinaryFiles verbosity dstManDir =<< srcManpages - where - dstManDir = mandir (absoluteInstallDirs pkg lbi copyDest) "man1" - srcManpages = zip (repeat srcManDir) - <$> filterM doesFileExist manpages - srcManDir = "" - manpages = ["git-annex.1", "git-annex-shell.1"] + where + dstManDir = mandir (absoluteInstallDirs pkg lbi copyDest) "man1" + srcManpages = zip (repeat srcManDir) + <$> filterM doesFileExist manpages + srcManDir = "" + manpages = ["git-annex.1", "git-annex-shell.1"] installDesktopFile :: CopyDest -> Verbosity -> PackageDescription -> LocalBuildInfo -> IO () installDesktopFile copyDest verbosity pkg lbi = InstallDesktopFile.install $ dstBinDir "git-annex" - where - dstBinDir = bindir $ absoluteInstallDirs pkg lbi copyDest + where + dstBinDir = bindir $ absoluteInstallDirs pkg lbi copyDest diff --git a/Types/Key.hs b/Types/Key.hs index 6794ee0..ecdf7b8 100644 --- a/Types/Key.hs +++ b/Types/Key.hs @@ -46,33 +46,33 @@ fieldSep = '-' key2file :: Key -> FilePath key2file Key { keyBackendName = b, keySize = s, keyMtime = m, keyName = n } = b +++ ('s' ?: s) +++ ('m' ?: m) +++ (fieldSep : n) - where - "" +++ y = y - x +++ "" = x - x +++ y = x ++ fieldSep:y - c ?: (Just v) = c : show v - _ ?: _ = "" + where + "" +++ y = y + x +++ "" = x + x +++ y = x ++ fieldSep:y + c ?: (Just v) = c : show v + _ ?: _ = "" file2key :: FilePath -> Maybe Key file2key s = if key == Just stubKey then Nothing else key - where - key = startbackend stubKey s + where + key = startbackend stubKey s - startbackend k v = sepfield k v addbackend + startbackend k v = sepfield k v addbackend - sepfield k v a = case span (/= fieldSep) v of - (v', _:r) -> findfields r $ a k v' - _ -> Nothing + sepfield k v a = case span (/= fieldSep) v of + (v', _:r) -> findfields r $ a k v' + _ -> Nothing - findfields (c:v) (Just k) - | c == fieldSep = Just $ k { keyName = v } - | otherwise = sepfield k v $ addfield c - findfields _ v = v + findfields (c:v) (Just k) + | c == fieldSep = Just $ k { keyName = v } + | otherwise = sepfield k v $ addfield c + findfields _ v = v - addbackend k v = Just k { keyBackendName = v } - addfield 's' k v = Just k { keySize = readish v } - addfield 'm' k v = Just k { keyMtime = readish v } - addfield _ _ _ = Nothing + addbackend k v = Just k { keyBackendName = v } + addfield 's' k v = Just k { keySize = readish v } + addfield 'm' k v = Just k { keyMtime = readish v } + addfield _ _ _ = Nothing prop_idempotent_key_encode :: Key -> Bool prop_idempotent_key_encode k = Just k == (file2key . key2file) k diff --git a/Types/UUID.hs b/Types/UUID.hs index 88c261b..8a304df 100644 --- a/Types/UUID.hs +++ b/Types/UUID.hs @@ -7,6 +7,8 @@ module Types.UUID where +import qualified Data.Map as M + -- A UUID is either an arbitrary opaque string, or UUID info may be missing. data UUID = NoUUID | UUID String deriving (Eq, Ord, Show, Read) @@ -18,3 +20,5 @@ fromUUID NoUUID = "" toUUID :: String -> UUID toUUID [] = NoUUID toUUID s = UUID s + +type UUIDMap = M.Map UUID String diff --git a/Upgrade.hs b/Upgrade.hs index 44ca632..705b190 100644 --- a/Upgrade.hs +++ b/Upgrade.hs @@ -15,8 +15,8 @@ import qualified Upgrade.V2 upgrade :: Annex Bool upgrade = go =<< getVersion - where - go (Just "0") = Upgrade.V0.upgrade - go (Just "1") = Upgrade.V1.upgrade - go (Just "2") = Upgrade.V2.upgrade - go _ = return True + where + go (Just "0") = Upgrade.V0.upgrade + go (Just "1") = Upgrade.V1.upgrade + go (Just "2") = Upgrade.V2.upgrade + go _ = return True diff --git a/Upgrade/V0.hs b/Upgrade/V0.hs index 8f3af33..00a08cb 100644 --- a/Upgrade/V0.hs +++ b/Upgrade/V0.hs @@ -40,10 +40,10 @@ getKeysPresent0 dir = ifM (liftIO $ doesDirectoryExist dir) <$> (filterM present =<< getDirectoryContents dir) , return [] ) - where - present d = do - result <- tryIO $ - getFileStatus $ dir ++ "/" ++ takeFileName d - case result of - Right s -> return $ isRegularFile s - Left _ -> return False + where + present d = do + result <- tryIO $ + getFileStatus $ dir ++ "/" ++ takeFileName d + case result of + Right s -> return $ isRegularFile s + Left _ -> return False diff --git a/Upgrade/V1.hs b/Upgrade/V1.hs index 8f7de39..966b51a 100644 --- a/Upgrade/V1.hs +++ b/Upgrade/V1.hs @@ -70,14 +70,14 @@ moveContent = do showAction "moving content" files <- getKeyFilesPresent1 forM_ files move - where - move f = do - let k = fileKey1 (takeFileName f) - let d = parentDir f - liftIO $ allowWrite d - liftIO $ allowWrite f - moveAnnex k f - liftIO $ removeDirectory d + where + move f = do + let k = fileKey1 (takeFileName f) + let d = parentDir f + liftIO $ allowWrite d + liftIO $ allowWrite f + moveAnnex k f + liftIO $ removeDirectory d updateSymlinks :: Annex () updateSymlinks = do @@ -86,54 +86,54 @@ updateSymlinks = do (files, cleanup) <- inRepo $ LsFiles.inRepo [top] forM_ files fixlink void $ liftIO cleanup - where - fixlink f = do - r <- lookupFile1 f - case r of - Nothing -> noop - Just (k, _) -> do - link <- calcGitLink f k - liftIO $ removeFile f - liftIO $ createSymbolicLink link f - Annex.Queue.addCommand "add" [Param "--"] [f] + where + fixlink f = do + r <- lookupFile1 f + case r of + Nothing -> noop + Just (k, _) -> do + link <- calcGitLink f k + liftIO $ removeFile f + liftIO $ createSymbolicLink link f + Annex.Queue.addCommand "add" [Param "--"] [f] moveLocationLogs :: Annex () moveLocationLogs = do showAction "moving location logs" logkeys <- oldlocationlogs forM_ logkeys move - where - oldlocationlogs = do - dir <- fromRepo Upgrade.V2.gitStateDir - ifM (liftIO $ doesDirectoryExist dir) - ( mapMaybe oldlog2key - <$> (liftIO $ getDirectoryContents dir) - , return [] - ) - move (l, k) = do - dest <- fromRepo $ logFile2 k - dir <- fromRepo Upgrade.V2.gitStateDir - let f = dir l - liftIO $ createDirectoryIfMissing True (parentDir dest) - -- could just git mv, but this way deals with - -- log files that are not checked into git, - -- as well as merging with already upgraded - -- logs that have been pulled from elsewhere - old <- liftIO $ readLog1 f - new <- liftIO $ readLog1 dest - liftIO $ writeLog1 dest (old++new) - Annex.Queue.addCommand "add" [Param "--"] [dest] - Annex.Queue.addCommand "add" [Param "--"] [f] - Annex.Queue.addCommand "rm" [Param "--quiet", Param "-f", Param "--"] [f] - + where + oldlocationlogs = do + dir <- fromRepo Upgrade.V2.gitStateDir + ifM (liftIO $ doesDirectoryExist dir) + ( mapMaybe oldlog2key + <$> (liftIO $ getDirectoryContents dir) + , return [] + ) + move (l, k) = do + dest <- fromRepo $ logFile2 k + dir <- fromRepo Upgrade.V2.gitStateDir + let f = dir l + liftIO $ createDirectoryIfMissing True (parentDir dest) + -- could just git mv, but this way deals with + -- log files that are not checked into git, + -- as well as merging with already upgraded + -- logs that have been pulled from elsewhere + old <- liftIO $ readLog1 f + new <- liftIO $ readLog1 dest + liftIO $ writeLog1 dest (old++new) + Annex.Queue.addCommand "add" [Param "--"] [dest] + Annex.Queue.addCommand "add" [Param "--"] [f] + Annex.Queue.addCommand "rm" [Param "--quiet", Param "-f", Param "--"] [f] + oldlog2key :: FilePath -> Maybe (FilePath, Key) oldlog2key l | drop len l == ".log" && sane = Just (l, k) | otherwise = Nothing - where - len = length l - 4 - k = readKey1 (take len l) - sane = (not . null $ keyName k) && (not . null $ keyBackendName k) + where + len = length l - 4 + k = readKey1 (take len l) + sane = (not . null $ keyName k) && (not . null $ keyBackendName k) -- WORM backend keys: "WORM:mtime:size:filename" -- all the rest: "backend:key" @@ -150,25 +150,25 @@ readKey1 v , keySize = s , keyMtime = t } - where - bits = split ":" v - b = Prelude.head bits - n = join ":" $ drop (if wormy then 3 else 1) bits - t = if wormy - then Just (Prelude.read (bits !! 1) :: EpochTime) - else Nothing - s = if wormy - then Just (Prelude.read (bits !! 2) :: Integer) - else Nothing - wormy = Prelude.head bits == "WORM" - mixup = wormy && isUpper (Prelude.head $ bits !! 1) + where + bits = split ":" v + b = Prelude.head bits + n = join ":" $ drop (if wormy then 3 else 1) bits + t = if wormy + then Just (Prelude.read (bits !! 1) :: EpochTime) + else Nothing + s = if wormy + then Just (Prelude.read (bits !! 2) :: Integer) + else Nothing + wormy = Prelude.head bits == "WORM" + mixup = wormy && isUpper (Prelude.head $ bits !! 1) showKey1 :: Key -> String showKey1 Key { keyName = n , keyBackendName = b, keySize = s, keyMtime = t } = join ":" $ filter (not . null) [b, showifhere t, showifhere s, n] - where - showifhere Nothing = "" - showifhere (Just v) = show v + where + showifhere Nothing = "" + showifhere (Just v) = show v keyFile1 :: Key -> FilePath keyFile1 key = replace "/" "%" $ replace "%" "&s" $ replace "&" "&a" $ showKey1 key @@ -190,21 +190,21 @@ lookupFile1 file = do case tl of Left _ -> return Nothing Right l -> makekey l - where - getsymlink = takeFileName <$> readSymbolicLink file - makekey l = case maybeLookupBackendName bname of - Nothing -> do - unless (null kname || null bname || - not (isLinkToAnnex l)) $ - warning skip - return Nothing - Just backend -> return $ Just (k, backend) - where - k = fileKey1 l - bname = keyBackendName k - kname = keyName k - skip = "skipping " ++ file ++ - " (unknown backend " ++ bname ++ ")" + where + getsymlink = takeFileName <$> readSymbolicLink file + makekey l = case maybeLookupBackendName bname of + Nothing -> do + unless (null kname || null bname || + not (isLinkToAnnex l)) $ + warning skip + return Nothing + Just backend -> return $ Just (k, backend) + where + k = fileKey1 l + bname = keyBackendName k + kname = keyName k + skip = "skipping " ++ file ++ + " (unknown backend " ++ bname ++ ")" getKeyFilesPresent1 :: Annex [FilePath] getKeyFilesPresent1 = getKeyFilesPresent1' =<< fromRepo gitAnnexObjectDir @@ -217,12 +217,12 @@ getKeyFilesPresent1' dir = liftIO $ filterM present files , return [] ) - where - present f = do - result <- tryIO $ getFileStatus f - case result of - Right s -> return $ isRegularFile s - Left _ -> return False + where + present f = do + result <- tryIO $ getFileStatus f + case result of + Right s -> return $ isRegularFile s + Left _ -> return False logFile1 :: Git.Repo -> Key -> String logFile1 repo key = Upgrade.V2.gitStateDir repo ++ keyFile1 key ++ ".log" diff --git a/Upgrade/V2.hs b/Upgrade/V2.hs index 1f4a40f..beddc5b 100644 --- a/Upgrade/V2.hs +++ b/Upgrade/V2.hs @@ -70,10 +70,10 @@ locationLogs = do levelb <- mapM tryDirContents levela files <- mapM tryDirContents (concat levelb) return $ mapMaybe islogfile (concat files) - where - tryDirContents d = catchDefaultIO [] $ dirContents d - islogfile f = maybe Nothing (\k -> Just (k, f)) $ - logFileKey $ takeFileName f + where + tryDirContents d = catchDefaultIO [] $ dirContents d + islogfile f = maybe Nothing (\k -> Just (k, f)) $ + logFileKey $ takeFileName f inject :: FilePath -> FilePath -> Annex () inject source dest = do diff --git a/Usage.hs b/Usage.hs index 66fb556..fc62bf5 100644 --- a/Usage.hs +++ b/Usage.hs @@ -23,30 +23,30 @@ usage header cmds commonoptions = unlines $ , "Commands:" , "" ] ++ cmdlines - where - -- To get consistent indentation of options, generate the - -- usage for all options at once. A command's options will - -- be displayed after the command. - alloptlines = filter (not . null) $ - lines $ usageInfo "" $ - concatMap cmdoptions scmds ++ commonoptions - (cmdlines, optlines) = go scmds alloptlines [] - go [] os ls = (ls, os) - go (c:cs) os ls = go cs os' (ls++(l:o)) - where - (o, os') = splitAt (length $ cmdoptions c) os - l = concat - [ cmdname c - , namepad (cmdname c) - , cmdparamdesc c - , descpad (cmdparamdesc c) - , cmddesc c - ] - pad n s = replicate (n - length s) ' ' - namepad = pad $ longest cmdname + 1 - descpad = pad $ longest cmdparamdesc + 2 - longest f = foldl max 0 $ map (length . f) cmds - scmds = sort cmds + where + -- To get consistent indentation of options, generate the + -- usage for all options at once. A command's options will + -- be displayed after the command. + alloptlines = filter (not . null) $ + lines $ usageInfo "" $ + concatMap cmdoptions scmds ++ commonoptions + (cmdlines, optlines) = go scmds alloptlines [] + go [] os ls = (ls, os) + go (c:cs) os ls = go cs os' (ls++(l:o)) + where + (o, os') = splitAt (length $ cmdoptions c) os + l = concat + [ cmdname c + , namepad (cmdname c) + , cmdparamdesc c + , descpad (cmdparamdesc c) + , cmddesc c + ] + pad n s = replicate (n - length s) ' ' + namepad = pad $ longest cmdname + 1 + descpad = pad $ longest cmdparamdesc + 2 + longest f = foldl max 0 $ map (length . f) cmds + scmds = sort cmds {- Descriptions of params used in usage messages. -} paramPaths :: String diff --git a/Utility/Applicative.o b/Utility/Applicative.o deleted file mode 100644 index de43a1a..0000000 Binary files a/Utility/Applicative.o and /dev/null differ diff --git a/Utility/CoProcess.o b/Utility/CoProcess.o deleted file mode 100644 index 4ddd510..0000000 Binary files a/Utility/CoProcess.o and /dev/null differ diff --git a/Utility/DBus.hs b/Utility/DBus.hs index cfd06f7..d31c20d 100644 --- a/Utility/DBus.hs +++ b/Utility/DBus.hs @@ -5,13 +5,17 @@ - Licensed under the GNU GPL version 3 or higher. -} -{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE OverloadedStrings, ScopedTypeVariables #-} module Utility.DBus where +import Utility.Exception + import DBus.Client import DBus import Data.Maybe +import Control.Concurrent +import Control.Exception as E type ServiceName = String @@ -26,3 +30,55 @@ callDBus client name params = call_ client $ { methodCallDestination = Just "org.freedesktop.DBus" , methodCallBody = params } + +{- Connects to the bus, and runs the client action. + - + - Throws a ClientError, and closes the connection if it fails to + - process an incoming message, or if the connection is lost. + - Unlike DBus's usual interface, this error is thrown at the top level, + - rather than inside the clientThreadRunner, so it can be caught, and + - runClient re-run as needed. -} +runClient :: IO (Maybe Address) -> (Client -> IO ()) -> IO () +runClient getaddr clientaction = do + env <- getaddr + case env of + Nothing -> throwIO (clientError "runClient: unable to determine DBUS address") + Just addr -> do + {- The clientaction will set up listeners, which + - run in a different thread. We block while + - they're running, until our threadrunner catches + - a ClientError, which it will put into the MVar + - to be rethrown here. -} + mv <- newEmptyMVar + let tr = threadrunner (putMVar mv) + let opts = defaultClientOptions { clientThreadRunner = tr } + client <- connectWith opts addr + clientaction client + e <- takeMVar mv + disconnect client + throw e + where + threadrunner storeerr io = loop + where + loop = catchClientError (io >> loop) storeerr + +{- Connects to the bus, and runs the client action. + - + - If the connection is lost, runs onretry, which can do something like + - a delay, or printing a warning, and has a state value (useful for + - exponential backoff). Once onretry returns, the connection is retried. + -} +persistentClient :: IO (Maybe Address) -> v -> (SomeException -> v -> IO v) -> (Client -> IO ()) -> IO () +persistentClient getaddr v onretry clientaction = + {- runClient can fail with not just ClientError, but also other + - things, if dbus is not running. Let async exceptions through. -} + runClient getaddr clientaction `catchNonAsync` retry + where + retry e = do + v' <- onretry e v + persistentClient getaddr v' onretry clientaction + +{- Catches only ClientError -} +catchClientError :: IO () -> (ClientError -> IO ()) -> IO () +catchClientError io handler = + either handler return =<< (E.try io :: IO (Either ClientError ())) diff --git a/Utility/Directory.o b/Utility/Directory.o deleted file mode 100644 index d380c08..0000000 Binary files a/Utility/Directory.o and /dev/null differ diff --git a/Utility/Exception.hs b/Utility/Exception.hs index 8b60777..45f2aec 100644 --- a/Utility/Exception.hs +++ b/Utility/Exception.hs @@ -1,10 +1,12 @@ -{- Simple IO exception handling +{- Simple IO exception handling (and some more) - - Copyright 2011-2012 Joey Hess - - Licensed under the GNU GPL version 3 or higher. -} +{-# LANGUAGE ScopedTypeVariables #-} + module Utility.Exception where import Prelude hiding (catch) @@ -34,3 +36,16 @@ catchIO = catch {- try specialized for IO errors only -} tryIO :: IO a -> IO (Either IOException a) tryIO = try + +{- Catches all exceptions except for async exceptions. + - This is often better to use than catching them all, so that + - ThreadKilled and UserInterrupt get through. + -} +catchNonAsync :: IO a -> (SomeException -> IO a) -> IO a +catchNonAsync a onerr = a `catches` + [ Handler (\ (e :: AsyncException) -> throw e) + , Handler (\ (e :: SomeException) -> onerr e) + ] + +tryNonAsync :: IO a -> IO (Either SomeException a) +tryNonAsync a = (Right <$> a) `catchNonAsync` (return . Left) diff --git a/Utility/Exception.o b/Utility/Exception.o deleted file mode 100644 index ce7d4ac..0000000 Binary files a/Utility/Exception.o and /dev/null differ diff --git a/Utility/FileMode.hs b/Utility/FileMode.hs index c742c69..7109c14 100644 --- a/Utility/FileMode.hs +++ b/Utility/FileMode.hs @@ -48,6 +48,9 @@ writeModes = [ownerWriteMode, groupWriteMode, otherWriteMode] readModes :: [FileMode] readModes = [ownerReadMode, groupReadMode, otherReadMode] +executeModes :: [FileMode] +executeModes = [ownerExecuteMode, groupExecuteMode, otherExecuteMode] + {- Removes the write bits from a file. -} preventWrite :: FilePath -> IO () preventWrite f = modifyFileMode f $ removeModes writeModes @@ -72,9 +75,7 @@ isSymLink = checkMode symbolicLinkMode {- Checks if a file has any executable bits set. -} isExecutable :: FileMode -> Bool -isExecutable mode = combineModes ebits `intersectFileModes` mode /= 0 - where - ebits = [ownerExecuteMode, groupExecuteMode, otherExecuteMode] +isExecutable mode = combineModes executeModes `intersectFileModes` mode /= 0 {- Runs an action without that pesky umask influencing it, unless the - passed FileMode is the standard one. -} diff --git a/Utility/FileMode.o b/Utility/FileMode.o deleted file mode 100644 index fb29fdd..0000000 Binary files a/Utility/FileMode.o and /dev/null differ diff --git a/Utility/FileSystemEncoding.o b/Utility/FileSystemEncoding.o deleted file mode 100644 index afbf066..0000000 Binary files a/Utility/FileSystemEncoding.o and /dev/null differ diff --git a/Utility/FreeDesktop.hs b/Utility/FreeDesktop.hs index 0845f33..7aba1f2 100644 --- a/Utility/FreeDesktop.hs +++ b/Utility/FreeDesktop.hs @@ -25,6 +25,7 @@ module Utility.FreeDesktop ( import Utility.Exception import Utility.Path +import Utility.UserInfo import Utility.Process import Utility.PartialPrelude diff --git a/Utility/Misc.hs b/Utility/Misc.hs index f035040..7c81f56 100644 --- a/Utility/Misc.hs +++ b/Utility/Misc.hs @@ -12,6 +12,9 @@ import Control.Monad import Foreign import Data.Char import Control.Applicative +import System.Posix.Process (getAnyProcessStatus) + +import Utility.Exception {- A version of hgetContents that is not lazy. Ensures file is - all read before it gets closed. -} @@ -96,3 +99,14 @@ hGetSomeString h sz = do where peekbytes :: Int -> Ptr Word8 -> IO [Word8] peekbytes len buf = mapM (peekElemOff buf) [0..pred len] + +{- Reaps any zombie git processes. + - + - Warning: Not thread safe. Anything that was expecting to wait + - on a process and get back an exit status is going to be confused + - if this reap gets there first. -} +reapZombies :: IO () +reapZombies = do + -- throws an exception when there are no child processes + catchDefaultIO Nothing (getAnyProcessStatus False True) + >>= maybe (return ()) (const reapZombies) diff --git a/Utility/Misc.o b/Utility/Misc.o deleted file mode 100644 index 549d96f..0000000 Binary files a/Utility/Misc.o and /dev/null differ diff --git a/Utility/Monad.hs b/Utility/Monad.hs index 6abd7ee..4c3c304 100644 --- a/Utility/Monad.hs +++ b/Utility/Monad.hs @@ -45,6 +45,10 @@ ma <||> mb = ifM ma ( return True , mb ) (<&&>) :: Monad m => m Bool -> m Bool -> m Bool ma <&&> mb = ifM ma ( mb , return False ) +{- Same fixity as && and || -} +infixr 3 <&&> +infixr 2 <||> + {- Runs an action, passing its value to an observer before returning it. -} observe :: Monad m => (a -> m b) -> m a -> m a observe observer a = do diff --git a/Utility/Monad.o b/Utility/Monad.o deleted file mode 100644 index 9aa4600..0000000 Binary files a/Utility/Monad.o and /dev/null differ diff --git a/Utility/NotificationBroadcaster.hs b/Utility/NotificationBroadcaster.hs index accc35f..4bbbc54 100644 --- a/Utility/NotificationBroadcaster.hs +++ b/Utility/NotificationBroadcaster.hs @@ -26,10 +26,10 @@ module Utility.NotificationBroadcaster ( import Common import Control.Concurrent.STM -import Control.Concurrent.SampleVar +import Control.Concurrent.MSampleVar -{- One SampleVar per client. The TMVar is never empty, so never blocks. -} -type NotificationBroadcaster = TMVar [SampleVar ()] +{- One MSampleVar per client. The TMVar is never empty, so never blocks. -} +type NotificationBroadcaster = TMVar [MSampleVar ()] newtype NotificationId = NotificationId Int deriving (Read, Show, Eq, Ord) @@ -47,7 +47,7 @@ newNotificationHandle b = NotificationHandle <*> addclient where addclient = do - s <- newEmptySampleVar + s <- newEmptySV atomically $ do l <- takeTMVar b putTMVar b $ l ++ [s] @@ -67,11 +67,11 @@ sendNotification b = do l <- atomically $ readTMVar b mapM_ notify l where - notify s = writeSampleVar s () + notify s = writeSV s () {- Used by a client to block until a new notification is available since - the last time it tried. -} waitNotification :: NotificationHandle -> IO () waitNotification (NotificationHandle b (NotificationId i)) = do l <- atomically $ readTMVar b - readSampleVar (l !! i) + readSV (l !! i) diff --git a/Utility/OSX.hs b/Utility/OSX.hs index e5c0b62..cf4a6e8 100644 --- a/Utility/OSX.hs +++ b/Utility/OSX.hs @@ -8,6 +8,7 @@ module Utility.OSX where import Utility.Path +import Utility.UserInfo import System.FilePath diff --git a/Utility/PartialPrelude.o b/Utility/PartialPrelude.o deleted file mode 100644 index 988db9b..0000000 Binary files a/Utility/PartialPrelude.o and /dev/null differ diff --git a/Utility/Path.hs b/Utility/Path.hs index 209ff1b..f4c2843 100644 --- a/Utility/Path.hs +++ b/Utility/Path.hs @@ -14,9 +14,9 @@ import System.Directory import Data.List import Data.Maybe import Control.Applicative -import System.Posix.User import Utility.Monad +import Utility.UserInfo {- Returns the parent directory of a path. Parent of / is "" -} parentDir :: FilePath -> FilePath @@ -128,10 +128,6 @@ preserveOrder (l:ls) new = found ++ preserveOrder ls rest runPreserveOrder :: ([FilePath] -> IO [FilePath]) -> [FilePath] -> IO [FilePath] runPreserveOrder a files = preserveOrder files <$> a files -{- Current user's home directory. -} -myHomeDir :: IO FilePath -myHomeDir = homeDirectory <$> (getUserEntryForID =<< getEffectiveUserID) - {- Converts paths in the home directory to use ~/ -} relHome :: FilePath -> IO String relHome path = do diff --git a/Utility/Path.o b/Utility/Path.o deleted file mode 100644 index 8e6b2d2..0000000 Binary files a/Utility/Path.o and /dev/null differ diff --git a/Utility/Process.hs b/Utility/Process.hs index 613dd8b..68d5452 100644 --- a/Utility/Process.hs +++ b/Utility/Process.hs @@ -23,6 +23,7 @@ module Utility.Process ( createBackgroundProcess, withHandle, withBothHandles, + withQuietOutput, createProcess, runInteractiveProcess, stdinHandle, @@ -66,8 +67,6 @@ readProcessEnv cmd args environ = {- Writes a string to a process on its stdout, - returns its output, and also allows specifying the environment. - - - - -} writeReadProcessEnv :: FilePath @@ -185,6 +184,19 @@ withBothHandles creator p a = creator p' $ a . bothHandles , std_err = Inherit } +{- Forces the CreateProcessRunner to run quietly; + - both stdout and stderr are discarded. -} +withQuietOutput + :: CreateProcessRunner + -> CreateProcess + -> IO () +withQuietOutput creator p = withFile "/dev/null" WriteMode $ \devnull -> do + let p' = p + { std_out = UseHandle devnull + , std_err = UseHandle devnull + } + creator p' $ const $ return () + {- Extract a desired handle from createProcess's tuple. - These partial functions are safe as long as createProcess is run - with appropriate parameters to set up the desired handle. diff --git a/Utility/Process.o b/Utility/Process.o deleted file mode 100644 index 6081a3d..0000000 Binary files a/Utility/Process.o and /dev/null differ diff --git a/Utility/Rsync.hs b/Utility/Rsync.hs index f8e19eb..68d2755 100644 --- a/Utility/Rsync.hs +++ b/Utility/Rsync.hs @@ -53,8 +53,13 @@ rsync = boolSystem "rsync" - The params must enable rsync's --progress mode for this to work. -} rsyncProgress :: (Integer -> IO ()) -> [CommandParam] -> IO Bool -rsyncProgress callback params = - withHandle StdoutHandle createProcessSuccess p (feedprogress 0 []) +rsyncProgress callback params = do + r <- withHandle StdoutHandle createProcessSuccess p (feedprogress 0 []) + {- For an unknown reason, piping rsync's output like this does + - causes it to run a second ssh process, which it neglects to wait + - on. Reap the resulting zombie. -} + reapZombies + return r where p = proc "rsync" (toCommand params) feedprogress prev buf h = do diff --git a/Utility/SRV.hs b/Utility/SRV.hs new file mode 100644 index 0000000..5443b8b --- /dev/null +++ b/Utility/SRV.hs @@ -0,0 +1,112 @@ +{- SRV record lookup + - + - Uses either the ADNS Haskell library, or the standalone Haskell DNS + - package, or the host command. + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +{-# LANGUAGE CPP #-} + +module Utility.SRV ( + mkSRVTcp, + mkSRV, + lookupSRV, +) where + +import Utility.Process +import Utility.Exception +import Utility.PartialPrelude + +import Network +import Data.Function +import Data.List +import Control.Applicative +import Data.Maybe + +#ifdef WITH_ADNS +import ADNS.Resolver +import Data.Either +#else +#ifndef WITH_HOST +#ifdef WITH_DNS +import qualified Network.DNS.Lookup as DNS +import Network.DNS.Resolver +import qualified Data.ByteString.UTF8 as B8 +#endif +#endif +#endif + +newtype SRV = SRV String + deriving (Show, Eq) + +type HostPort = (HostName, PortID) + +type PriorityWeight = (Int, Int) -- sort by priority first, then weight + +mkSRV :: String -> String -> HostName -> SRV +mkSRV transport protocol host = SRV $ concat + ["_", protocol, "._", transport, ".", host] + +mkSRVTcp :: String -> HostName -> SRV +mkSRVTcp = mkSRV "tcp" + +{- Returns an ordered list, with highest priority hosts first. + - + - On error, returns an empty list. -} +lookupSRV :: SRV -> IO [HostPort] +#ifdef WITH_ADNS +lookupSRV (SRV srv) = initResolver [] $ \resolver -> do + r <- catchDefaultIO (Right []) $ + resolveSRV resolver srv + return $ either (\_ -> []) id r +#else +#ifdef WITH_HOST +lookupSRV = lookupSRVHost +#else +#ifdef WITH_DNS +lookupSRV (SRV srv) = do + seed <- makeResolvSeed defaultResolvConf + print srv + r <- withResolver seed $ flip DNS.lookupSRV $ B8.fromString srv + print r + return $ maybe [] (orderHosts . map tohosts) r + where + tohosts (priority, weight, port, hostname) = + ( (priority, weight) + , (B8.toString hostname, PortNumber $ fromIntegral port) + ) +#else +lookupSRV = lookupSRVHost +#endif +#endif +#endif + +lookupSRVHost :: SRV -> IO [HostPort] +lookupSRVHost (SRV srv) = catchDefaultIO [] $ + parseSrvHost <$> readProcessEnv "host" ["-t", "SRV", "--", srv] + -- clear environment, to avoid LANG affecting output + (Just []) + +parseSrvHost :: String -> [HostPort] +parseSrvHost = orderHosts . catMaybes . map parse . lines + where + parse l = case words l of + [_, _, _, _, spriority, sweight, sport, hostname] -> do + let v = + ( readish sport :: Maybe Int + , readish spriority :: Maybe Int + , readish sweight :: Maybe Int + ) + case v of + (Just port, Just priority, Just weight) -> Just + ( (priority, weight) + , (hostname, PortNumber $ fromIntegral port) + ) + _ -> Nothing + _ -> Nothing + +orderHosts :: [(PriorityWeight, HostPort)] -> [HostPort] +orderHosts = map snd . sortBy (compare `on` fst) diff --git a/Utility/SafeCommand.o b/Utility/SafeCommand.o deleted file mode 100644 index f38d6a7..0000000 Binary files a/Utility/SafeCommand.o and /dev/null differ diff --git a/Utility/State.hs b/Utility/State.hs index c27f3c2..7f89190 100644 --- a/Utility/State.hs +++ b/Utility/State.hs @@ -5,9 +5,11 @@ - Licensed under the GNU GPL version 3 or higher. -} +{-# LANGUAGE PackageImports #-} + module Utility.State where -import Control.Monad.State.Strict +import "mtl" Control.Monad.State.Strict {- Modifies Control.Monad.State's state, forcing a strict update. - This avoids building thunks in the state and leaking. diff --git a/Utility/TempFile.o b/Utility/TempFile.o deleted file mode 100644 index f87a546..0000000 Binary files a/Utility/TempFile.o and /dev/null differ diff --git a/Utility/ThreadScheduler.hs b/Utility/ThreadScheduler.hs index 6557398..5e165c9 100644 --- a/Utility/ThreadScheduler.hs +++ b/Utility/ThreadScheduler.hs @@ -11,6 +11,8 @@ module Utility.ThreadScheduler where import Common import Control.Concurrent +import Control.Exception +import Control.Concurrent.Async import System.Posix.Terminal import System.Posix.Signals @@ -44,6 +46,19 @@ unboundDelay time = do threadDelay $ fromInteger maxWait when (maxWait /= time) $ unboundDelay (time - maxWait) +{- Runs an action until a timeout is reached. If it fails to complete in + - time, or throws an exception, returns a Left value. + - + - Note that if the action runs an unsafe foreign call, the signal to + - cancel it may not arrive until the call returns. -} +runTimeout :: Seconds -> IO a -> IO (Either SomeException a) +runTimeout secs a = do + runner <- async a + controller <- async $ do + threadDelaySeconds secs + cancel runner + cancel controller `after` waitCatch runner + {- Pauses the main thread, letting children run until program termination. -} waitForTermination :: IO () waitForTermination = do diff --git a/Utility/Url.hs b/Utility/Url.hs index 6a45c55..e47cb9d 100644 --- a/Utility/Url.hs +++ b/Utility/Url.hs @@ -37,10 +37,15 @@ check url headers expected_size = handle <$> exists url headers {- Checks that an url exists and could be successfully downloaded, - also returning its size if available. -} exists :: URLString -> Headers -> IO (Bool, Maybe Integer) -exists url headers = - case parseURI url of - Nothing -> return (False, Nothing) - Just u -> do +exists url headers = case parseURI url of + Nothing -> return (False, Nothing) + Just u + | uriScheme u == "file:" -> do + s <- catchMaybeIO $ getFileStatus (uriPath u) + return $ case s of + Nothing -> (False, Nothing) + Just stat -> (True, Just $ fromIntegral $ fileSize stat) + | otherwise -> do r <- request u headers HEAD case rspCode r of (2,_,_) -> return (True, size r) @@ -54,9 +59,13 @@ exists url headers = - so is preferred.) Which program to use is determined at run time; it - would not be appropriate to test at configure time and build support - for only one in. + - + - Curl is always used for file:// urls, as wget does not support them. -} download :: URLString -> Headers -> [CommandParam] -> FilePath -> IO Bool -download url headers options file = ifM (inPath "wget") (wget , curl) +download url headers options file + | "file://" `isPrefixOf` url = curl + | otherwise = ifM (inPath "wget") (wget , curl) where headerparams = map (\h -> Param $ "--header=" ++ h) headers wget = go "wget" $ headerparams ++ [Params "-c -O"] @@ -110,10 +119,15 @@ request url headers requesttype = go 5 url Nothing -> return rsp Just newURI -> go n newURI_abs where -#ifdef URI_24 - newURI_abs = newURI `relativeTo` u -#else +#if defined VERSION_network +#if ! MIN_VERSION_network(2,4,0) +#define WITH_OLD_URI +#endif +#endif +#ifdef WITH_OLD_URI newURI_abs = fromMaybe newURI (newURI `relativeTo` u) +#else + newURI_abs = newURI `relativeTo` u #endif addheaders req = setHeaders req (rqHeaders req ++ userheaders) userheaders = rights $ map parseHeader headers diff --git a/Utility/UserInfo.hs b/Utility/UserInfo.hs new file mode 100644 index 0000000..6e75754 --- /dev/null +++ b/Utility/UserInfo.hs @@ -0,0 +1,32 @@ +{- user info + - + - Copyright 2012 Joey Hess + - + - Licensed under the GNU GPL version 3 or higher. + -} + +module Utility.UserInfo ( + myHomeDir, + myUserName +) where + +import Control.Applicative +import System.Posix.User +import System.Posix.Env + +{- Current user's home directory. + - + - getpwent will fail on LDAP or NIS, so use HOME if set. -} +myHomeDir :: IO FilePath +myHomeDir = myVal ["HOME"] homeDirectory + +{- Current user's user name. -} +myUserName :: IO String +myUserName = myVal ["USER", "LOGNAME"] userName + +myVal :: [String] -> (UserEntry -> String) -> IO String +myVal envvars extract = maybe (extract <$> getpwent) return =<< check envvars + where + check [] = return Nothing + check (v:vs) = maybe (check vs) (return . Just) =<< getEnv v + getpwent = getUserEntryForID =<< getEffectiveUserID diff --git a/Utility/WebApp.hs b/Utility/WebApp.hs index e11b3f4..cbc4ce9 100644 --- a/Utility/WebApp.hs +++ b/Utility/WebApp.hs @@ -86,10 +86,9 @@ localSocket = do go' :: Int -> AddrInfo -> IO Socket go' 0 _ = error "unable to bind to local socket" go' n addr = do - r <- tryIO $ bracketOnError (open addr) close (use addr) + r <- tryIO $ bracketOnError (open addr) sClose (use addr) either (const $ go' (pred n) addr) return r open addr = socket (addrFamily addr) (addrSocketType addr) (addrProtocol addr) - close = sClose use addr sock = do setSocketOption sock ReuseAddr 1 bindSocket sock (addrAddress addr) diff --git a/debian/changelog b/debian/changelog index 0e01ba3..c05d529 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,54 @@ +git-annex (3.20121112) unstable; urgency=low + + * assistant: Can use XMPP to notify other nodes about pushes made to other + repositories, as well as pushing to them directly over XMPP. + * wepapp: Added an XMPP configuration interface. + * webapp: Supports pairing over XMPP, with both friends, and other repos + using the same account. + * assistant: Drops non-preferred content when possible. + * assistant: Notices, and applies config changes as they are made to + the git-annex branch, including config changes pushed in from remotes. + * git-annex-shell: GIT_ANNEX_SHELL_DIRECTORY can be set to limit it + to operating on a specified directory. + * webapp: When setting up authorized_keys, use GIT_ANNEX_SHELL_DIRECTORY. + * Preferred content path matching bugfix. + * Preferred content expressions cannot use "in=". + * Preferred content expressions can use "present". + * Fix handling of GIT_DIR when it refers to a git submodule. + * Depend on and use the Haskell SafeSemaphore library, which provides + exception-safe versions of SampleVar and QSemN. + Thanks, Ben Gamari for an excellent patch set. + * file:/// URLs can now be used with the web special remote. + * webapp: Allow dashes in ssh key comments when pairing. + * uninit: Check and abort if there are symlinks to annexed content that + are not checked into git. + * webapp: Switched to using the same multicast IP address that avahi uses. + * bup: Don't pass - to bup-split to make it read stdin; bup 0.25 + does not accept that. + * bugfix: Don't fail transferring content from read-only repos. + Closes: #691341 + * configure: Check that checksum programs produce correct checksums. + * Re-enable dbus, using a new version of the library that fixes the memory + leak. + * NetWatcher: When dbus connection is lost, try to reconnect. + * Use USER and HOME environment when set, and only fall back to getpwent, + which doesn't work with LDAP or NIS. + * rsync special remote: Include annex-rsync-options when running rsync + to test a key's presence. + * The standalone tarball's runshell now takes care of installing a + ~/.ssh/git-annex-shell wrapper the first time it's run. + * webapp: Make an initial, empty commit so there is a master branch + * assistant: Fix syncing local drives. + * webapp: Fix creation of rsync.net repositories. + * webapp: Fix renaming of special remotes. + * webapp: Generate better git remote names. + * webapp: Ensure that rsync special remotes are enabled using the same + name they were originally created using. + * Bugfix: Fix hang in webapp when setting up a ssh remote with an absolute + path. + + -- Joey Hess Mon, 12 Nov 2012 10:39:47 -0400 + git-annex (3.20121017) unstable; urgency=low * Fix zombie cleanup reversion introduced in 3.20121009. diff --git a/debian/control b/debian/control index 505ea6e..00ab576 100644 --- a/debian/control +++ b/debian/control @@ -22,7 +22,7 @@ Build-Depends: libghc-edit-distance-dev, libghc-hinotify-dev [linux-any], libghc-stm-dev (>= 2.3), - libghc-dbus-dev [linux-any], + libghc-dbus-dev (>= 0.10.3) [linux-any], libghc-yesod-dev [i386 amd64 kfreebsd-i386 kfreebsd-amd64], libghc-yesod-static-dev [i386 amd64 kfreebsd-i386 kfreebsd-amd64], libghc-yesod-default-dev [i386 amd64 kfreebsd-i386 kfreebsd-amd64], @@ -35,10 +35,14 @@ Build-Depends: libghc-wai-logger-dev, libghc-warp-dev, libghc-blaze-builder-dev, - libghc-blaze-html-dev, libghc-crypto-api-dev, libghc-network-multicast-dev, libghc-network-info-dev, + libghc-safesemaphore-dev, + libghc-network-protocol-xmpp-dev (>= 0.4.3-2), + libghc-gnutls-dev (>= 0.1.4), + libghc-xml-types-dev, + libghc-async-dev, ikiwiki, perlmagick, git, @@ -59,7 +63,7 @@ Depends: ${misc:Depends}, ${shlibs:Depends}, rsync, wget | curl, openssh-client (>= 1:5.6p1) -Recommends: lsof, gnupg +Recommends: lsof, gnupg, bind9-host Suggests: graphviz, bup, libnss-mdns Description: manage files with git, without checking their contents into git git-annex allows managing files with git, without checking the file diff --git a/debian/rules b/debian/rules index c0fbd9a..090e739 100755 --- a/debian/rules +++ b/debian/rules @@ -2,9 +2,9 @@ ARCH = $(shell dpkg-architecture -qDEB_BUILD_ARCH) ifeq (install ok installed,$(shell dpkg-query -W -f '$${Status}' libghc-yesod-dev 2>/dev/null)) -export FEATURES=-DWITH_ASSISTANT -DWITH_S3 -DWITH_OLD_YESOD -DWITH_WEBAPP -DWITH_PAIRING +export FEATURES=-DWITH_ASSISTANT -DWITH_S3 -DWITH_OLD_URI -DWITH_OLD_YESOD -DWITH_WEBAPP -DWITH_PAIRING -DWITH_XMPP else -export FEATURES=-DWITH_ASSISTANT -DWITH_S3 -DWITH_PAIRING +export FEATURES=-DWITH_ASSISTANT -DWITH_S3 -DWITH_OLD_URI -DWITH_PAIRING -DWITH_XMPP endif %: diff --git a/doc/assistant.mdwn b/doc/assistant.mdwn index a38aa09..c451d45 100644 --- a/doc/assistant.mdwn +++ b/doc/assistant.mdwn @@ -3,14 +3,14 @@ removable drives, and cloud services, which it keeps synchronised, so its contents are the same everywhere. It's very easy to use, and has all the power of git and git-annex. -Note that the git-annex assistant is still beta quality code. See -[[the_errata|errata]] for known infelicities. - ## installation The git-annex assistant comes as part of git-annex, starting with version 3.20120924. See [[install]] to get it installed. +Note that the git-annex assistant is still beta quality code. See +the [[release_notes]] for known infelicities and upgrade instructions. + ## quick start To get started with the git-annex assistant, just pick it from @@ -32,7 +32,9 @@ interface to add repositories and control the git-annex assistant. ## documentation * Want to make two nearby computers share the same synchronised folder? - Follow the [[pairing_walkthrough]]. + Follow the [[local_pairing_walkthrough]]. +* Want to share a synchronised folder with a friend? + Follow the [[share_with_a_friend_walkthrough]]. ## command line startup diff --git a/doc/assistant/buddylist.png b/doc/assistant/buddylist.png new file mode 100644 index 0000000..40b5a92 Binary files /dev/null and b/doc/assistant/buddylist.png differ diff --git a/doc/assistant/errata.mdwn b/doc/assistant/errata.mdwn deleted file mode 100644 index 03d0d47..0000000 --- a/doc/assistant/errata.mdwn +++ /dev/null @@ -1,70 +0,0 @@ -## version 3.20121009 - -This is a maintenance release of the git-annex assistant, which is still in -beta. - -In general, anything you can configure with the assistant's web app -will work. Some examples of use cases supported by this release include: - -* [[Pairing|pairing_walkthrough]] two computers that are on the same local - network (or VPN) and automatically keeping the files in the annex in - sync as changes are made to them. -* Cloning your repository to removable drives, USB keys, etc. The assistant - will notice when the drive is mounted and keep it in sync. - Such a drive can be stored as an offline backup, or transported between - computers to keep them in sync. -* Cloning your repository to a remote server, running ssh, and uploading - changes made to your files to the server. There is special support - for using the rsync.net cloud provider this way, or any shell account - on a typical unix server, such as a Linode VPS can be used. - -The following are known limitations of this release of the git-annex -assistant: - -* On Mac OSX and BSD operating systems, the assistant uses kqueue to watch - files. Kqueue has to open every directory it watches, so too many - directories will run it out of the max number of open files (typically - 1024), and fail. See [[bugs/Issue_on_OSX_with_some_system_limits]] - for a workaround. -* In order to ensure that all multiple repositories are kept in sync, - each computer with a repository must be running the git-annex assistant. -* The assistant does not yet always manage to keep repositories in sync - when some are hidden from others behind firewalls. - -## version 3.20120924 - -This is the first beta release of the git-annex assistant. - -In general, anything you can configure with the assistant's web app -will work. Some examples of use cases supported by this release include: - -* [[Pairing|pairing_walkthrough]] two computers that are on the same local - network (or VPN) and automatically keeping the files in the annex in - sync as changes are made to them. -* Cloning your repository to removable drives, USB keys, etc. The assistant - will notice when the drive is mounted and keep it in sync. - Such a drive can be stored as an offline backup, or transported between - computers to keep them in sync. -* Cloning your repository to a remote server, running ssh, and uploading - changes made to your files to the server. There is special support - for using the rsync.net cloud provider this way, or any shell account - on a typical unix server, such as a Linode VPS can be used. - -The following are known limitations of this release of the git-annex -assistant: - -* On Mac OSX and BSD operating systems, the assistant uses kqueue to watch - files. Kqueue has to open every directory it watches, so too many - directories will run it out of the max number of open files (typically - 1024), and fail. See [[bugs/Issue_on_OSX_with_some_system_limits]] - for a workaround. -* In order to ensure that all multiple repositories are kept in sync, - each computer with a repository must be running the git-annex assistant. -* The assistant does not yet always manage to keep repositories in sync - when some are hidden from others behind firewalls. -* If a file is checked into git as a normal file and gets modified - (or merged, etc), it will be converted into an annexed file. So you - should not mix use of the assistant with normal git files in the same - repository yet. -* If you `git annex unlock` a file, it will immediately be re-locked. - See [[bugs/watcher_commits_unlocked_files]]. diff --git a/doc/assistant/pairing_walkthrough.mdwn b/doc/assistant/pairing_walkthrough.mdwn index 78bd91b..07b6399 100644 --- a/doc/assistant/pairing_walkthrough.mdwn +++ b/doc/assistant/pairing_walkthrough.mdwn @@ -10,7 +10,9 @@ We'll start on your computer, where you open up your git annex dashboard. `*click*` -[[!img pairing.png alt="Pair with local computer"]] +[[!img pairing.png alt="Pair with another computer"]] + +`*click*` Now the hard bit. You have to think up a secret phrase, and type it in, (and perhaps get the spelling correct). @@ -41,10 +43,18 @@ plugged into the same router. Also, the file sharing set up by this pairing only works when both computers are on the same network. If you go on a trip, any files you -edit will not be visible to your friend until you get back. **But**, -you can get around this by hooking both computers up to a server on the -internet, which they can use to exchange files while disconnected. The -git annex assistant makes that easy too. +edit will not be visible to your friend until you get back. + +To get around this, you'll often also want to set up +[[jabber_pairing|share_with_a_friend_walkthrough]], and a server +in the cloud, which they can use to exchange files while away. And also, you can pair with as many other computers as you like, not just one! + +## What does pairing actually do behind the scenes? + +It ensures that both repositories have correctly configured +[[remotes|walkthrough/adding_a_remote]] pointing to each other. +If you have already configured this manually, you do not need to +perform pairing. diff --git a/doc/assistant/release_notes.mdwn b/doc/assistant/release_notes.mdwn new file mode 100644 index 0000000..270897f --- /dev/null +++ b/doc/assistant/release_notes.mdwn @@ -0,0 +1,116 @@ +## version 3.20121012 + +This is a major upgrade of the git-annex assistant, which is still in beta. + +In general, anything you can configure with the assistant's web app +will work. Some examples of use cases supported by this release include: + +* [[Sharing repositories with friends|share_with_a_friend_walkthrough]] + contacted through a Jabber server (such as Google Talk). +* Setting up cloud repositories, that are used as backups, archives, + or transfer points between repositories that cannot directly contact + one-another. +* [[Pairing|pairing_walkthrough]] two computers that are on the same local + network (or VPN) and automatically keeping the files in the annex in + sync as changes are made to them. +* Cloning your repository to removable drives, USB keys, etc. The assistant + will notice when the drive is mounted and keep it in sync. + Such a drive can be stored as an offline backup, or transported between + computers to keep them in sync. + +The following upgrade notes apply if you're upgrading from a previous version: + +* For best results, edit the configuration of repositories you set + up with older versions, and place them in a repository group. + This lets the assistant know how you want to use the repository; for backup, + archival, as a transfer point for clients, etc. Go to Configuration -> + Manage Repositories, and click in the "configure" link to edit a repository's + configuration. +* If you set up a cloud repository with an older version, and have multiple + clients using it, you are recommended to configure an Jabber account, + so that clients can use it to communicate when sending data to the + cloud repository. Configure Jabber by opening the webapp, and going to + Configuration -> Configure jabber account +* When setting up local pairing, the assistant did not limit the paired + computer to accessing a single git repository. This new version does, + by setting GIT_ANNEX_SHELL_DIRECTORY in `~/.ssh/authorized_keys`. + +The following are known limitations of this release of the git-annex +assistant: + +* On Mac OSX and BSD operating systems, the assistant uses kqueue to watch + files. Kqueue has to open every directory it watches, so too many + directories will run it out of the max number of open files (typically + 1024), and fail. See [[bugs/Issue_on_OSX_with_some_system_limits]] + for a workaround. + +## version 3.20121009 + +This is a maintenance release of the git-annex assistant, which is still in +beta. + +In general, anything you can configure with the assistant's web app +will work. Some examples of use cases supported by this release include: + +* [[Pairing|pairing_walkthrough]] two computers that are on the same local + network (or VPN) and automatically keeping the files in the annex in + sync as changes are made to them. +* Cloning your repository to removable drives, USB keys, etc. The assistant + will notice when the drive is mounted and keep it in sync. + Such a drive can be stored as an offline backup, or transported between + computers to keep them in sync. +* Cloning your repository to a remote server, running ssh, and uploading + changes made to your files to the server. There is special support + for using the rsync.net cloud provider this way, or any shell account + on a typical unix server, such as a Linode VPS can be used. + +The following are known limitations of this release of the git-annex +assistant: + +* On Mac OSX and BSD operating systems, the assistant uses kqueue to watch + files. Kqueue has to open every directory it watches, so too many + directories will run it out of the max number of open files (typically + 1024), and fail. See [[bugs/Issue_on_OSX_with_some_system_limits]] + for a workaround. +* In order to ensure that all multiple repositories are kept in sync, + each computer with a repository must be running the git-annex assistant. +* The assistant does not yet always manage to keep repositories in sync + when some are hidden from others behind firewalls. + +## version 3.20120924 + +This is the first beta release of the git-annex assistant. + +In general, anything you can configure with the assistant's web app +will work. Some examples of use cases supported by this release include: + +* [[Pairing|pairing_walkthrough]] two computers that are on the same local + network (or VPN) and automatically keeping the files in the annex in + sync as changes are made to them. +* Cloning your repository to removable drives, USB keys, etc. The assistant + will notice when the drive is mounted and keep it in sync. + Such a drive can be stored as an offline backup, or transported between + computers to keep them in sync. +* Cloning your repository to a remote server, running ssh, and uploading + changes made to your files to the server. There is special support + for using the rsync.net cloud provider this way, or any shell account + on a typical unix server, such as a Linode VPS can be used. + +The following are known limitations of this release of the git-annex +assistant: + +* On Mac OSX and BSD operating systems, the assistant uses kqueue to watch + files. Kqueue has to open every directory it watches, so too many + directories will run it out of the max number of open files (typically + 1024), and fail. See [[bugs/Issue_on_OSX_with_some_system_limits]] + for a workaround. +* In order to ensure that all multiple repositories are kept in sync, + each computer with a repository must be running the git-annex assistant. +* The assistant does not yet always manage to keep repositories in sync + when some are hidden from others behind firewalls. +* If a file is checked into git as a normal file and gets modified + (or merged, etc), it will be converted into an annexed file. So you + should not mix use of the assistant with normal git files in the same + repository yet. +* If you `git annex unlock` a file, it will immediately be re-locked. + See [[bugs/watcher_commits_unlocked_files]]. diff --git a/doc/assistant/share_with_a_friend_walkthrough.mdwn b/doc/assistant/share_with_a_friend_walkthrough.mdwn new file mode 100644 index 0000000..a748ebd --- /dev/null +++ b/doc/assistant/share_with_a_friend_walkthrough.mdwn @@ -0,0 +1,58 @@ +Want to share all the files in your repository with a friend? + +Let's suppose you use Google Mail, and so does your friend, and you +sometimes also chat in Google Talk. The git-annex assistant will +use your Google account to share with your friend. (This actually +works with any Jabber account you use, not just Google Talk.) + +Start by opening up your git annex dashboard. + +[[!img pairing_walkthrough/addrepository.png alt="Add another repository button"]] + +`*click*` + +[[!img pairing.png alt="Share with a friend"]] + +`*click*` + +[[!img xmpp.png alt="Configuring Jabber account"]] + +Fill that out, and git-annex will be able to show you a list of your +friends. + +[[!img buddylist.png alt="Buddy list"]] + +This list will refresh as friends log on and off, so you can +leave it open in a tab until a friend is available to start pairing. + +(If your friend is not using git-annex yet, now's a great time to spread +the word!) + +Once you click on "Start Pairing", your friend will see this pop up +on their git annex dashboard. + +[[!img xmppalert.png alt="Pair request"]] + +Once your friend clicks on that, your repositories will be paired. + +### But, wait, there's one more step... + +Despite the repositories being paired now, you and your friend can't yet +quite share files. You'll start to see your friend's files show up in your +git-annex folder, but you won't be able to open them yet. + +What you need to do now is set up a repository out there in the cloud, +that both you and your friend can access. This will be used to transfer +files between the two of you. + +At the end of the pairing process, a number of cloud providers are +suggested, and the git-annex assistant makes it easy to configure one of +them. Once you or your friend sets it up, it'll show up in the other +one's list of repositories: + +[[!img repolist.png alt="Repository list"]] + +The final step is to share the login information for the cloud repository +with your friend, so they can enable it too. + +With that complete, you'll be able to open your friend's files! diff --git a/doc/assistant/share_with_a_friend_walkthrough/buddylist.png b/doc/assistant/share_with_a_friend_walkthrough/buddylist.png new file mode 100644 index 0000000..ce3d61a Binary files /dev/null and b/doc/assistant/share_with_a_friend_walkthrough/buddylist.png differ diff --git a/doc/assistant/share_with_a_friend_walkthrough/pairing.png b/doc/assistant/share_with_a_friend_walkthrough/pairing.png new file mode 100644 index 0000000..533f4ae Binary files /dev/null and b/doc/assistant/share_with_a_friend_walkthrough/pairing.png differ diff --git a/doc/assistant/share_with_a_friend_walkthrough/repolist.png b/doc/assistant/share_with_a_friend_walkthrough/repolist.png new file mode 100644 index 0000000..409da4a Binary files /dev/null and b/doc/assistant/share_with_a_friend_walkthrough/repolist.png differ diff --git a/doc/assistant/share_with_a_friend_walkthrough/xmppalert.png b/doc/assistant/share_with_a_friend_walkthrough/xmppalert.png new file mode 100644 index 0000000..5e2d562 Binary files /dev/null and b/doc/assistant/share_with_a_friend_walkthrough/xmppalert.png differ diff --git a/doc/assistant/xmpp.png b/doc/assistant/xmpp.png new file mode 100644 index 0000000..c3cc53e Binary files /dev/null and b/doc/assistant/xmpp.png differ diff --git a/doc/assistant/xmppnudge.png b/doc/assistant/xmppnudge.png new file mode 100644 index 0000000..b3a0658 Binary files /dev/null and b/doc/assistant/xmppnudge.png differ diff --git a/doc/assistant/xmpppairingend.png b/doc/assistant/xmpppairingend.png new file mode 100644 index 0000000..f0c9e76 Binary files /dev/null and b/doc/assistant/xmpppairingend.png differ diff --git a/doc/bare_repositories.mdwn b/doc/bare_repositories.mdwn index f40277d..dde74c6 100644 --- a/doc/bare_repositories.mdwn +++ b/doc/bare_repositories.mdwn @@ -25,15 +25,13 @@ Here is a quick example of how to set this up, using `origin` as the remote name On the server: - mkdir bare-annex - cd bare-annex - git init --bare + git init --bare bare-annex.git git annex init origin Now configure the remote and do the initial push: cd ~/annex - git remote add origin example.com:bare-annex + git remote add origin example.com:bare-annex.git git push origin master git-annex Now `git annex status` should show the configured bare remote. If it does not, you may have to pull from the remote first (older versions of `git-annex`) diff --git a/doc/bare_repositories/comment_1_148e1da70d37d311634a0309a4ff8dcd._comment b/doc/bare_repositories/comment_1_148e1da70d37d311634a0309a4ff8dcd._comment new file mode 100644 index 0000000..c1ba9f2 --- /dev/null +++ b/doc/bare_repositories/comment_1_148e1da70d37d311634a0309a4ff8dcd._comment @@ -0,0 +1,22 @@ +[[!comment format=mdwn + username="https://www.google.com/accounts/o8/id?id=AItOawmraN_ldJplGunVGmnjjLN6jL9s9TrVMGE" + nickname="Ævar Arnfjörð" + subject="How to convert bare repositories to non-bare" + date="2012-11-11T20:14:44Z" + content=""" +I made a repository bare and later wanted to convert it, this would have worked with just plain git: + + cd bare-repo.git + mkdir .git + mv .??* * .git/ + git config --unset core.bare + git reset --hard + +But because git-annex uses different hashing directories under bare repositories all the files in the repo will point to files you don't have. Here's how you can fix that up assuming you're using a backend that assigns unique hashes based on file content (e.g. the SHA256 backend): + + mv .git/annex/objects from-bare-repo + git annex add from-bare-repo + git rm -f from-bare-repo + + +"""]] diff --git a/doc/bugs/Building_fails:_Could_not_find_module___96__Text.Blaze__39__.mdwn b/doc/bugs/Building_fails:_Could_not_find_module___96__Text.Blaze__39__.mdwn new file mode 100644 index 0000000..b75d92e --- /dev/null +++ b/doc/bugs/Building_fails:_Could_not_find_module___96__Text.Blaze__39__.mdwn @@ -0,0 +1,105 @@ +What steps will reproduce the problem? + +
+dominik@Atlantis:/var/tmp$ git clone git://github.com/joeyh/git-annex.git
+Cloning into 'git-annex'...
+remote: Counting objects: 40580, done.
+remote: Compressing objects: 100% (10514/10514), done.
+remote: Total 40580 (delta 29914), reused 40502 (delta 29837)
+Receiving objects: 100% (40580/40580), 9.17 MiB | 238 KiB/s, done.
+Resolving deltas: 100% (29914/29914), done.
+dominik@Atlantis:/var/tmp$ cd git-annex/
+dominik@Atlantis:/var/tmp/git-annex$ cabal update
+Downloading the latest package list from hackage.haskell.org
+dominik@Atlantis:/var/tmp/git-annex$ cabal install --only-dependencies
+Resolving dependencies...
+All the requested packages are already installed:
+Use --reinstall if you want to reinstall anyway.
+dominik@Atlantis:/var/tmp/git-annex$ cabal configure
+Resolving dependencies...
+[ 1 of 21] Compiling Utility.FileSystemEncoding ( Utility/FileSystemEncoding.hs, dist/setup/Utility/FileSystemEncoding.o )
+[ 2 of 21] Compiling Utility.Applicative ( Utility/Applicative.hs, dist/setup/Utility/Applicative.o )
+[ 3 of 21] Compiling Utility.PartialPrelude ( Utility/PartialPrelude.hs, dist/setup/Utility/PartialPrelude.o )
+[ 4 of 21] Compiling Utility.UserInfo ( Utility/UserInfo.hs, dist/setup/Utility/UserInfo.o )
+[ 5 of 21] Compiling Utility.Monad    ( Utility/Monad.hs, dist/setup/Utility/Monad.o )
+[ 6 of 21] Compiling Utility.Path     ( Utility/Path.hs, dist/setup/Utility/Path.o )
+[ 7 of 21] Compiling Utility.OSX      ( Utility/OSX.hs, dist/setup/Utility/OSX.o )
+[ 8 of 21] Compiling Utility.Exception ( Utility/Exception.hs, dist/setup/Utility/Exception.o )
+[ 9 of 21] Compiling Utility.TempFile ( Utility/TempFile.hs, dist/setup/Utility/TempFile.o )
+[10 of 21] Compiling Utility.Misc     ( Utility/Misc.hs, dist/setup/Utility/Misc.o )
+[11 of 21] Compiling Utility.Process  ( Utility/Process.hs, dist/setup/Utility/Process.o )
+[12 of 21] Compiling Utility.FreeDesktop ( Utility/FreeDesktop.hs, dist/setup/Utility/FreeDesktop.o )
+[13 of 21] Compiling Assistant.Install.AutoStart ( Assistant/Install/AutoStart.hs, dist/setup/Assistant/Install/AutoStart.o )
+[14 of 21] Compiling Utility.SafeCommand ( Utility/SafeCommand.hs, dist/setup/Utility/SafeCommand.o )
+[15 of 21] Compiling Utility.Directory ( Utility/Directory.hs, dist/setup/Utility/Directory.o )
+[16 of 21] Compiling Common           ( Common.hs, dist/setup/Common.o )
+[17 of 21] Compiling Locations.UserConfig ( Locations/UserConfig.hs, dist/setup/Locations/UserConfig.o )
+[18 of 21] Compiling Build.TestConfig ( Build/TestConfig.hs, dist/setup/Build/TestConfig.o )
+[19 of 21] Compiling Build.Configure  ( Build/Configure.hs, dist/setup/Build/Configure.o )
+[20 of 21] Compiling Build.InstallDesktopFile ( Build/InstallDesktopFile.hs, dist/setup/Build/InstallDesktopFile.o )
+[21 of 21] Compiling Main             ( Setup.hs, dist/setup/Main.o )
+Linking ./dist/setup/setup ...
+  checking version... 3.20121018
+  checking git... yes
+  checking git version... 1.7.10.4
+  checking cp -a... yes
+  checking cp -p... yes
+  checking cp --reflink=auto... yes
+  checking uuid generator... uuidgen
+  checking xargs -0... yes
+  checking rsync... yes
+  checking curl... yes
+  checking wget... yes
+  checking bup... no
+  checking gpg... yes
+  checking lsof... yes
+  checking host... no
+  checking ssh connection caching... yes
+  checking sha1... sha1sum
+  checking sha256... sha256sum
+  checking sha512... sha512sum
+  checking sha224... sha224sum
+  checking sha384... sha384sum
+Configuring git-annex-3.20121018...
+dominik@Atlantis:/var/tmp/git-annex$ cabal build
+Building git-annex-3.20121018...
+Preprocessing executable 'git-annex' for git-annex-3.20121018...
+
+Assistant/Alert.hs:21:8:
+    Could not find module `Text.Blaze'
+    It is a member of the hidden package `blaze-markup-0.5.1.1'.
+    Perhaps you need to add `blaze-markup' to the build-depends in your .cabal file.
+    Use -v to see a list of the files searched for.
+
+ +What is the expected output? What do you see instead? + +I expect the latest git HEAD to build without an error message or provide me with a package I need to install. Instead the error above is shown. In fact the package requested is installed: + +
+dominik@Atlantis:/var/tmp/git-annex$ cabal install blaze-markup
+Resolving dependencies...
+All the requested packages are already installed:
+blaze-markup-0.5.1.1
+Use --reinstall if you want to reinstall anyway.
+
+ +What version of git-annex are you using? On what operating system? + +git HEAD, Ubuntu 12.10 + +Please provide any additional information below. + +
+$ cabal --version
+cabal-install version 0.14.0
+using version 1.14.0 of the Cabal library 
+
+$ ghc --version
+The Glorious Glasgow Haskell Compilation System, version 7.4.2
+
+$ uname -a
+Linux Atlantis 3.5.0-17-generic #28-Ubuntu SMP Tue Oct 9 19:31:23 UTC 2012 x86_64 x86_64 x86_64 GNU/Linux
+
+ +> [[done]] --[[Joey]] diff --git a/doc/bugs/Building_fails:_Not_in_scope:___96__myHomeDir__39___.mdwn b/doc/bugs/Building_fails:_Not_in_scope:___96__myHomeDir__39___.mdwn new file mode 100644 index 0000000..e1d2da4 --- /dev/null +++ b/doc/bugs/Building_fails:_Not_in_scope:___96__myHomeDir__39___.mdwn @@ -0,0 +1,56 @@ +What steps will reproduce the problem? + +Building of the current github HEAD fails with a strange error message regarding OSX. I'm not using OSX but Ubuntu 12.10, why is cabal trying to build these files? + +
+dominik@Atlantis:/var/tmp$ git clone git://github.com/joeyh/git-annex.git
+Cloning into 'git-annex'...
+remote: Counting objects: 40243, done.
+remote: Compressing objects: 100% (10568/10568), done.
+remote: Total 40243 (delta 29647), reused 40044 (delta 29449)
+Receiving objects: 100% (40243/40243), 9.12 MiB | 184 KiB/s, done.
+Resolving deltas: 100% (29647/29647), done.
+dominik@Atlantis:/var/tmp$ cd git-annex/
+dominik@Atlantis:/var/tmp/git-annex$ cabal update
+Downloading the latest package list from hackage.haskell.org
+dominik@Atlantis:/var/tmp/git-annex$ cabal install --only-dependencies
+Resolving dependencies...
+All the requested packages are already installed:
+Use --reinstall if you want to reinstall anyway.
+dominik@Atlantis:/var/tmp/git-annex$ cabal configure
+Resolving dependencies...
+[ 1 of 21] Compiling Utility.FileSystemEncoding ( Utility/FileSystemEncoding.hs, dist/setup/Utility/FileSystemEncoding.o )
+[ 2 of 21] Compiling Utility.Applicative ( Utility/Applicative.hs, dist/setup/Utility/Applicative.o )
+[ 3 of 21] Compiling Utility.PartialPrelude ( Utility/PartialPrelude.hs, dist/setup/Utility/PartialPrelude.o )
+[ 4 of 21] Compiling Utility.UserInfo ( Utility/UserInfo.hs, dist/setup/Utility/UserInfo.o )
+[ 5 of 21] Compiling Utility.Monad    ( Utility/Monad.hs, dist/setup/Utility/Monad.o )
+[ 6 of 21] Compiling Utility.Path     ( Utility/Path.hs, dist/setup/Utility/Path.o )
+[ 7 of 21] Compiling Utility.OSX      ( Utility/OSX.hs, dist/setup/Utility/OSX.o )
+
+Utility/OSX.hs:22:17: Not in scope: `myHomeDir'
+
+ +What is the expected output? What do you see instead? + +I expect cabal to build git-annex. + +What version of git-annex are you using? On what operating system? + +github HEAD on Ubuntu 12.10 + +Please provide any additional information below. + +
+$ cabal --version
+cabal-install version 0.14.0
+using version 1.14.0 of the Cabal library 
+
+$ ghc --version
+The Glorious Glasgow Haskell Compilation System, version 7.4.2
+
+$ uname -a
+Linux Atlantis 3.5.0-17-generic #28-Ubuntu SMP Tue Oct 9 19:31:23 UTC 2012 x86_64 x86_64 x86_64 GNU/Linux
+
+
+ +> [[fixed|done]] --[[Joey]] diff --git a/doc/bugs/Calls_to_rsync_don__39__t_always_use__annex-rsync-options.mdwn b/doc/bugs/Calls_to_rsync_don__39__t_always_use__annex-rsync-options.mdwn new file mode 100644 index 0000000..df1163b --- /dev/null +++ b/doc/bugs/Calls_to_rsync_don__39__t_always_use__annex-rsync-options.mdwn @@ -0,0 +1,35 @@ +What steps will reproduce the problem? + +Add a rsync special remote - one that you need a username/password to access (stored in text file $HOME/.rsync.password): + + $ git annex initremote myrsync type=rsync rsyncurl=rsync://username@rsync.example.com/myrsync encryption=none + $ git annex describe myrsync "rsync server" + $ git config remote.myrsync.annex-rsync-options "--password-file=$HOME/.rsync.password" + +Copy a file to the remote: + + $ git annex -d copy my-file --to myrsync + +What is the expected output? What do you see instead? + +Expect to see the file copied over to the rsync remote, but the check doesn't use the annex-rsync-options and asks for a password. The debug output is: + + copy my-file (checking myrsync...) [2012-10-28 01:01:01 EST] call: sh ["-c","rsync --quiet 'rsync://username@rsync.example.com/myrsync/[...SNIP...]' 2>/dev/null"] + +However the actual copy does use annex-rsync-options and the copy works: + + [2012-10-28 01:01:05 EST] read: rsync ["--password-file=/home/blah/.rsync.password","--progress","--recursive","--partial","--partial-dir=.rsync-partial","/home/blah/annex/.git/annex/tmp/rsynctmp/12345/","rsync://username@rsync.example.com/myrsync"] + + +What version of git-annex are you using? On what operating system? + +git-annex: 3.20121017 + +OS: Ubuntu 12.04 + +Please provide any additional information below. + +I think this fix is as easy as including the annex-rsync-options wherever rsync is called. + +> I belive there was only the one place this was neglected. [[done]] +> --[[Joey]] diff --git a/doc/bugs/Cannot_clone_an_annex.mdwn b/doc/bugs/Cannot_clone_an_annex.mdwn new file mode 100644 index 0000000..62e9ed2 --- /dev/null +++ b/doc/bugs/Cannot_clone_an_annex.mdwn @@ -0,0 +1,67 @@ +I have an annex that I use to store my digital photos. I had a few false +starts creating this annex, but now it's looking good on my server: + + root@titan.local:/tank/Media/Pictures# git annex status + supported backends: SHA256E SHA1E SHA512E SHA224E SHA384E SHA256 SHA1 SHA512 SHA224 SHA384 WORM URL + supported remote types: git S3 bup directory rsync web hook + trusted repositories: 0 + semitrusted repositories: 2 + 00000000-0000-0000-0000-000000000001 -- web + be88bc5a-17e2-11e2-a99b-d388d4437350 -- here (titan) + untrusted repositories: 0 + dead repositories: 5 + 0A9F3136-A12A-43C7-9BE2-33F59954FD52 -- vulcan + 57349F02-E497-4420-9230-6B15D8AB14EE -- vulcan + 6195C912-2707-4B75-AC8C-11C51FAA8FE0 -- vulcan + D51DEDC4-9255-4A99-8520-2B1CED337674 -- hermes + EE327B34-3E20-4B5B-8F0E-D500CBC9738D -- hermes + transfers in progress: none + available local disk space: unknown + local annex keys: 20064 + local annex size: 217 gigabytes + known annex keys: 21496 + known annex size: 217 gigabytes + bloom filter size: 16 mebibytes (4% full) + backend usage: + SHA256E: 41560 + root@titan.local:/tank/Media/Pictures# git annex unused + unused . (checking for unused data...) ok + +It passes `git annex fsck` without any problems. However, when I "git clone" +this annex to my desktop machine and then do a `git annex sync`, I see this: + + Vulcan /Volumes/tank/Media/Pictures (master) $ git annex status + supported backends: SHA256E SHA1E SHA512E SHA224E SHA384E SHA256 SHA1 SHA512 SHA224 SHA384 WORM URL + supported remote types: git S3 bup directory rsync web hook + trusted repositories: 0 + semitrusted repositories: 5 + 00000000-0000-0000-0000-000000000001 -- web + 0A9F3136-A12A-43C7-9BE2-33F59954FD52 -- vulcan + 274D3474-7A25-44CD-8368-CF11C451014F -- here (vulcan) + EE327B34-3E20-4B5B-8F0E-D500CBC9738D -- hermes + be88bc5a-17e2-11e2-a99b-d388d4437350 -- titan + untrusted repositories: 0 + dead repositories: 3 + 57349F02-E497-4420-9230-6B15D8AB14EE -- vulcan + 6195C912-2707-4B75-AC8C-11C51FAA8FE0 -- vulcan + D51DEDC4-9255-4A99-8520-2B1CED337674 -- hermes + transfers in progress: none + available local disk space: 1 terabyte (+1 megabyte reserved) + local annex keys: 0 + local annex size: 0 bytes + known annex keys: 21025 + known annex size: 217 gigabytes + bloom filter size: 16 mebibytes (0% full) + backend usage: + SHA256: 18707 + SHA256E: 2318 + +Where did all these `SHA256` keys come from? + +Why doesn't the known annex keys size match? + +Further, I cannot `git annex get` on most of the files, because it says that +the `SHA256` key is not present. + +It looks like I'll have to rollback my ZFS snapshots and start over, but I'm +wondering: how was I even able to create this situation? diff --git a/doc/bugs/Detection_assumes_that_shell_is_bash.mdwn b/doc/bugs/Detection_assumes_that_shell_is_bash.mdwn new file mode 100644 index 0000000..23298bf --- /dev/null +++ b/doc/bugs/Detection_assumes_that_shell_is_bash.mdwn @@ -0,0 +1,18 @@ +###What steps will reproduce the problem?### + +"Adding a remote server using ssh" and try to add a remote server where the account has ex. tcsh as loginshell + +###What is the expected output? What do you see instead?### + +To discover remote programs, it dumps away some born-shell code like: +"echo git-annex-probe loggedin;if which git-annex-shell; then echo git-annex-probe git-annex-shell; fi;if which rsync; then echo git-annex-probe rsync; fi;if which ~/.ssh/git-annex-shell; then echo git-annex-probe ~/.ssh/git-annex-shell; fi" + +just wrap it with a bash -c '..' and you know that its interpreted by bash. + +###What version of git-annex are you using? On what operating system?### + +git-annex version: 3.20121017 + +###Please provide any additional information below.### + +Not everyone has bash as there login-shell. diff --git a/doc/bugs/GPG_passphrase_repeated_prompt.mdwn b/doc/bugs/GPG_passphrase_repeated_prompt.mdwn new file mode 100644 index 0000000..085aede --- /dev/null +++ b/doc/bugs/GPG_passphrase_repeated_prompt.mdwn @@ -0,0 +1,24 @@ +#### What steps will reproduce the problem? + +1. Create a new repository with a directory +2. Add files +3. Select "Store your data in the cloud" with the "Remote server" option +4. Enter host, user, directory +5. Select "Use an encrypted rsync repository on the server" (Will there be an option for unencrypted later?) +6. GPG Passphrase prompt comes up for every file + +#### What is the expected output? What do you see instead? + +I expect to enter a passphase once and then it will sync all files with the remote server. + +Instead, it begins syncing the files to the server but prompts for a GPG passphase for every single file. + +#### What version of git-annex are you using? On what operating system? + +3.20121017 precompiled binary on Arch Linux + +#### Please provide any additional information below. + +Not sure if I'm just missing a setting for GPG, but I would think I should only need to use the web app to configure the remote server. + +[[!tag /design/assistant]] diff --git a/doc/bugs/It_is_very_easy_to_turn_git-annex_into_a_zombie.mdwn b/doc/bugs/It_is_very_easy_to_turn_git-annex_into_a_zombie.mdwn new file mode 100644 index 0000000..dd92591 --- /dev/null +++ b/doc/bugs/It_is_very_easy_to_turn_git-annex_into_a_zombie.mdwn @@ -0,0 +1,23 @@ +What steps will reproduce the problem? + +Run the git-annex assitant, and then "sudo kill" it. + +What is the expected output? What do you see instead? + +I expect it to die, instead I end up with: + + 14604 ?? S 0:00.64 ga assistant + 14623 ?? Z 0:00.00 (git) + 14624 ?? Z 0:00.00 (git) + 14936 ?? Z 0:00.00 (git-annex) + +The only way to clear these zombies is to reboot. Perhaps there is some resource not being correctly terminated under exceptional conditions? + +Note that on OpenIndiana the problem is even more severe: Aborting git-annex at the wrong time leaves behind both zombie processes and lock files which cause the machine to suddenly halt if I try to access them in any way (via mv, rsync, etc)! + +What version of git-annex are you using? On what operating system? + +4d1e0c9 on OS X 10.8.2. + +Please provide any additional information below. + diff --git a/doc/bugs/Most_recent_git-annex_will_not_build_on_OpenIndiana.mdwn b/doc/bugs/Most_recent_git-annex_will_not_build_on_OpenIndiana.mdwn new file mode 100644 index 0000000..ee188eb --- /dev/null +++ b/doc/bugs/Most_recent_git-annex_will_not_build_on_OpenIndiana.mdwn @@ -0,0 +1,36 @@ +Version 3.20120825 built on my OpenIndiana system just fine, but the latest release gives me this during setup: + + Linking /tmp/git-annex-3.20121017-13013/git-annex-3.20121017/dist/setup/setup ... + checking version... 3.20121017 + checking git... yes + checking git version... 1.7.8.2 + checking cp -a... yes + checking cp -p... yes + checking cp --reflink=auto... yes + checking uuid generator... uuid -m + checking xargs -0... yes + checking rsync... yes + checking curl... yes + checking wget... yes + checking bup... no + checking gpg... no + checking lsof... no + checking ssh connection caching... yes + checking sha1... sha1sum + checking sha256... sha256sum + checking sha512... sha512sum + checking sha224... sha224sum + checking sha384... sha384sum + Configuring git-annex-3.20121017... + Building git-annex-3.20121017... + Preprocessing executable 'git-annex' for git-annex-3.20121017... + In file included from Mounts.hsc:25:0: + Utility/libmounts.h:13:3: warning: #warning mounts listing code not available for this OS [-Wcpp] + + Utility/libkqueue.c:13:23: + fatal error: sys/event.h: No such file or directory + compilation terminated. + +Is it possible to remove the new requirement? Thanks! + +> [[done]] --[[Joey]] diff --git a/doc/bugs/OSX_git-annex.app_error:__LSOpenURLsWithRole__40____41__.mdwn b/doc/bugs/OSX_git-annex.app_error:__LSOpenURLsWithRole__40____41__.mdwn new file mode 100644 index 0000000..cf73bec --- /dev/null +++ b/doc/bugs/OSX_git-annex.app_error:__LSOpenURLsWithRole__40____41__.mdwn @@ -0,0 +1,23 @@ +**What steps will reproduce the problem?** + +Either double click on the app or from the terminal + + $ open /Applications/git-annex.app + +**What is the expected output? What do you see instead?** + +I'd expect to see git-annex run. "git-annex" doesn't run and what I see (in the terminal) is: + + LSOpenURLsWithRole() failed with error -10810 for the file /Applications/git-annex.app. + +**What version of git-annex are you using? On what operating system?** + +*git-annex*: 3.20121017 + +*git-annex.app*: ??? + +*OS*: OSX 10.6.8 64 bit + + +**Please provide any additional information below.** + diff --git a/doc/bugs/Tries_to_upload_to_remote_although_remote_is_dead.mdwn b/doc/bugs/Tries_to_upload_to_remote_although_remote_is_dead.mdwn new file mode 100644 index 0000000..6b8f66d --- /dev/null +++ b/doc/bugs/Tries_to_upload_to_remote_although_remote_is_dead.mdwn @@ -0,0 +1,51 @@ +What steps will reproduce the problem? + +I added a (encrypted) ssh remote and everything worked fine. Now I marked the remote as dead, but git-annex still tries to upload to this remote. I recognize this because it asks for my ssh and gpg keys passwords. + +While transfering (or asking for the password), `git annex status` shows the following: +
+supported backends: SHA256E SHA1E SHA512E SHA224E SHA384E SHA256 SHA1 SHA512 SHA224 SHA384 WORM URL
+supported remote types: git S3 bup directory rsync web hook
+trusted repositories: 0
+semitrusted repositories: 2
+	00000000-0000-0000-0000-000000000001 -- web
+ 	cd16b9c6-f464-11e1-9845-8749687232d2 -- here (Dell)
+untrusted repositories: 0
+dead repositories: 7
+	11379fa0-ecd6-49e2-9bec-24fc19cc7b9f -- vserver.dbruhn.de_annex
+ 	2195e036-d2ef-4357-8c89-a9aaec23ebdc -- vserver-plain
+ 	4d066ea1-fb9f-45fd-990a-5c5c836f530e -- inTmp
+ 	bb276045-6ba6-488f-88d0-39a3c5f5134d -- vserver-enc
+ 	c49f3372-3fcf-49fc-b626-73ba4454c172 -- annexBare (bareAnnex)
+ 	e52645b3-bfb6-457d-b281-967353919e29 -- AnnexUSBFAT
+ 	ea3d6acc-716c-48e8-9b6b-993b90dcc1db -- vserver2
+transfers in progress: 
+	uploading Schmidt/somefile.m4a
+
+
+ to vserver2
+available local disk space: 43 gigabytes (+1 megabyte reserved)
+temporary directory size: 389 megabytes (clean up with git-annex unused)
+local annex keys: 23
+local annex size: 396 megabytes
+known annex keys: 19
+known annex size: 396 megabytes
+bloom filter size: 16 mebibytes (0% full)
+backend usage: 
+	SHA256E: 42
+
+ +As you can see, the `vserver2` remote is marked as dead but git-annex still tries to upload. This problem keeps occuring even after restarts. + +What is the expected output? What do you see instead? + +If I do not get the `dead` status wrong, git-annex should not use these remotes. + + +What version of git-annex are you using? On what operating system? + +git-annex HEAD from yesterdays git. Ubuntu 12.10 + +Please provide any additional information below. + + diff --git a/doc/bugs/__34__drop__34___deletes_all_files_with_identical_content.mdwn b/doc/bugs/__34__drop__34___deletes_all_files_with_identical_content.mdwn new file mode 100644 index 0000000..bdf4876 --- /dev/null +++ b/doc/bugs/__34__drop__34___deletes_all_files_with_identical_content.mdwn @@ -0,0 +1,49 @@ +# What steps will reproduce the problem? + + echo "TEST CONTENT" > fileA + cp fileA fileB + git annex add file{A,B} + git annex drop fileA --force + cat fileB + +# What is the expected output? What do you see instead? + +## expected: + +--> TEST CONTENT + +## observed: + +--> cat: fileB: No such file or directory + + +# What version of git-annex are you using? On what operating system? + +git-annex version: 3.20121017 + +# Please provide any additional information below. + +I really like git annex's feature, to store the same content only once. But as this happens transparently (i.e. the user does not need to no, nor is he told, that contents are identical (which is very comfortable, of course)), the "git annex drop" function is broken. For it effectively deleting (seemingly) random files, WITHOUT notifying the user. + + +# Possible solution? + +One simple solution would be to use "git annex find" functionality to see who else uses the file and NOT deleting it. + +But this still leaves a problem: + +Consider the following variation of the above example and assume, that "drop" does not delete content that is still used (i.e. implementing the above solution). + + echo "TEST CONTENT" > fileA + cp fileA fileB + git annex add file{A,B} + git rm fileB + git annex drop fileA --force + git checkout --force + cat fileB +--> cat: fileB: No such file or directory + +Here again, the problem is, that the user would probably (correct me if I am wrong) expect that the fileB still exists, because removing a file and checking it out again is expected to not mess with the annex contents (?). He does not know, that the "annex frop fileA" actually drop fileB's contents, because there was no additional file linking to it. It effectively performed a "git annex dropunused". + +> We seem to have agreed this is reasonable behavior, and a doc change was done. +> Do feel free to suggest other doc changes.. [[done]] --[[Joey]] diff --git a/doc/bugs/acl_not_honoured_in_rsync_remote.mdwn b/doc/bugs/acl_not_honoured_in_rsync_remote.mdwn new file mode 100644 index 0000000..b5f92a9 --- /dev/null +++ b/doc/bugs/acl_not_honoured_in_rsync_remote.mdwn @@ -0,0 +1,57 @@ +in a setup where an rsync(+gnupg) remote is shared among different users of the same git-annex repository (ie, the people copying to there use different accounts on the rsync server), acls are not honored under some circumstances. + +the error message reads as follows: + + copy …filename… (to prometheus...) Reading passphrase from file descriptor 11 + + sending incremental file list + rsync: recv_generator: mkdir "/home/shared/photos/encrypted_storage/9a6/0ff" failed: Permission denied (13) + *** Skipping any contents from this failed directory *** + 9a6/0ff/ + rsync error: some files/attrs were not transferred (see previous errors) (code 23) at main.c(1070) [sender=3.0.8] + + sent 185 bytes received 18 bytes 135.33 bytes/sec + total size is 2119419 speedup is 10440.49 + + This could have failed because --fast is enabled. + failed + +the acl used in my particular case is: + + # file: . + # owner: chrysn + # group: chrysn + user::rwx + group::rwx + group:family:rwx + mask::rwx + other::r-x + default:user::rwx + default:group::rwx + default:group:family:rwx + default:mask::rwx + default:other::r-x + +sub-directories are observed to have diverging permissions, though: + + # file: 794 + # owner: chrysn + # group: chrysn + user::rwx + group::rwx #effective:r-x + group:family:rwx #effective:r-x + mask::r-x + other::r-x + default:user::rwx + default:group::rwx + default:group:family:rwx + default:mask::rwx + default:other::r-x + +something seems to apply the umask (default 022) and revoke group write access from the files, overruling the acl. this is not what a umask is normally used for, and smells of [coreutils slavishly observing posix specs that don't consider all features](http://savannah.gnu.org/bugs/?19546) -- the observed effect is exactly what's described there. + +the git annex version used is 3.20121017 as in debian, the receiving site uses rsync 3.0.7; the affected directories come from a time when these very versions are known to have been used. + +this is probably not a bug of git-annex alone, but affects its operation and might be solvable by invoking rsync differently. + +(this is kind of a follow-up on [[forum/__34__permission_denied__34___in_fsck_on_shared_repo]]) diff --git a/doc/bugs/archiving_git_repositories.mdwn b/doc/bugs/archiving_git_repositories.mdwn new file mode 100644 index 0000000..1753c10 --- /dev/null +++ b/doc/bugs/archiving_git_repositories.mdwn @@ -0,0 +1 @@ +In a true dropbox-like fashion, I tried to import my entire homefolder into the git-annex assistant. However, it seems that git-annex breaks on the several git repositories I've got checked out in my "Projects" folder. Is this a possible use case, or should I look at other tools to perform this with? diff --git a/doc/bugs/build_problem_on_OSX.mdwn b/doc/bugs/build_problem_on_OSX.mdwn new file mode 100644 index 0000000..e859a11 --- /dev/null +++ b/doc/bugs/build_problem_on_OSX.mdwn @@ -0,0 +1,18 @@ +I just squelched a bunch of build issues (to do with dependancies) on my autobuilder for OSX, this is currently happening + +
+install -d tmp
+ghc -O2 -Wall -outputdir tmp -IUtility  -DWITH_ASSISTANT -DWITH_S3 -DWITH_WEBAPP -DWITH_PAIRING -DWITH_XMPP -DWITH_DNS -DWITH_KQUEUE -threaded --make git-annex -o tmp/git-annex Utility/libdiskfree.o Utility/libmounts.o Utility/libkqueue.o
+
+Assistant/Threads/NetWatcher.hs:29:0:
+     warning: #warning Building without dbus support; will poll for network connection changes
+
+Assistant/Threads/MountWatcher.hs:36:0:
+     warning: #warning Building without dbus support; will use mtab polling
+[ 29 of 259] Compiling Utility.OSX      ( Utility/OSX.hs, tmp/Utility/OSX.o )
+
+Utility/OSX.hs:22:17: Not in scope: `myHomeDir'
+make: *** [git-annex] Error 1
+
+ +> Someone else reported that too; I fixed it. [[done]] --[[Joey]] diff --git a/doc/bugs/creds_directory_not_automatically_created.mdwn b/doc/bugs/creds_directory_not_automatically_created.mdwn new file mode 100644 index 0000000..d4e436d --- /dev/null +++ b/doc/bugs/creds_directory_not_automatically_created.mdwn @@ -0,0 +1,3 @@ +I just compiled ff7810eb83d8372e6206d487c63482d678e0b3d4 and created a new git-annex repository through the setup steps of "git-annex webapp". Then I tried configuring a Jabber account from the webapp. It then failed to create $REPO/.git/annex/creds/xmpp with a "No such file or directory" message because $REPO/.git/annex/creds did not get created. After doing a manual mkdir the Jabber setup went through fine. + +> [[Fixed|done]], thanks. --[[Joey]] diff --git a/doc/bugs/git-annex:_getUserEntryForID:_failed___40__Success__41__.mdwn b/doc/bugs/git-annex:_getUserEntryForID:_failed___40__Success__41__.mdwn new file mode 100644 index 0000000..976109c --- /dev/null +++ b/doc/bugs/git-annex:_getUserEntryForID:_failed___40__Success__41__.mdwn @@ -0,0 +1,13 @@ +What steps will reproduce the problem? + Start "./git-annex-webapp" + +What is the expected output? What do you see instead? + The webapp should start, but I get the error "git-annex: getUserEntryForID: failed (Success)" + +What version of git-annex are you using? On what operating system? + 3.20121017 on "Ubuntu 10.04.4 LTS" 32-Bit + +Please provide any additional information below. + + +> [[fixed|done]] --[[Joey]] diff --git a/doc/bugs/gpg_bundled_with_OSX_build_fails.mdwn b/doc/bugs/gpg_bundled_with_OSX_build_fails.mdwn new file mode 100644 index 0000000..81090fd --- /dev/null +++ b/doc/bugs/gpg_bundled_with_OSX_build_fails.mdwn @@ -0,0 +1,20 @@ +What steps will reproduce the problem? + +run + + /Applications/git-annex.app/Contents/MacOS/bin/gpg + +from the terminal + +What is the expected output? What do you see instead? + +I expect to see typical gpg output. Instead, I see + + dyld: Library not loaded: /opt/local/lib/libiconv.2.dylib + Referenced from: /Applications/git-annex.app/Contents/MacOS/bin/gpg + Reason: Incompatible library version: gpg requires version 8.0.0 or later, but libiconv.2.dylib provides version 7.0.0 + Trace/BPT trap: 5 + +What version of git-annex are you using? On what operating system? + +git annex Version: 3.20121017 on Mac OS X 10.7.5 diff --git a/doc/bugs/migrated_files_not_showing_up_in_unused_list.mdwn b/doc/bugs/migrated_files_not_showing_up_in_unused_list.mdwn new file mode 100644 index 0000000..e76448b --- /dev/null +++ b/doc/bugs/migrated_files_not_showing_up_in_unused_list.mdwn @@ -0,0 +1,59 @@ +Word summary: After migrating from SHA256 to SHA256E I still have a ton of SHA256 files around that aren't being found by unused. + +Command outputs (see, specifically, the output of status showing number of SHA256 and SHA256E files): + + + greg@eeepc:/mnt/blackbox/Media/Pictures/Photos$ less .gitattributes + * annex.backend=SHA256E + + greg@eeepc:/mnt/blackbox/Media/Pictures/Photos$ git-annex migrate . + (Recording state in git...) + + greg@eeepc:/mnt/blackbox/Media/Pictures/Photos$ git-annex unused + unused . (checking for unused data...) (checking master...) (checking rose/master...) (checking x200s/master...) + Some partially transferred data exists in temporary files: + NUMBER KEY + 1 SHA256E-s15766010--8132a02a8b245eb9842e89c5e696df4e9c82d676f8dec3c6bb96892c19f99d51.jpg + + To remove unwanted data: git-annex dropunused NUMBER + + ok + (Recording state in git...) + + greg@eeepc:/mnt/blackbox/Media/Pictures/Photos$ git-annex status + supported backends: SHA256E SHA1E SHA512E SHA224E SHA384E SHA256 SHA1 SHA512 SHA224 SHA384 WORM URL + supported remote types: git S3 bup directory rsync web hook + trusted repositories: 1 + c0e4106e-2631-11e2-9749-1bfa37a61069 -- rose + semitrusted repositories: 3 + 00000000-0000-0000-0000-000000000001 -- web + 9bd4077e-196c-11e2-9cc9-9faafb3e34ee -- x200s + c69d6fcc-18d1-11e2-9487-2fe6dbf0516b -- here (photos on eeepc) + untrusted repositories: 0 + dead repositories: 1 + 3ebd5ac2-2092-11e2-856a-bb0203cce179 -- Photos on Rose + transfers in progress: + downloading 2011/06/30/IMG_8180.jpg + from rose + available local disk space: 2 terabytes (+1 megabyte reserved) + temporary directory size: 9 megabytes (clean up with git-annex unused) + local annex keys: 36210 + local annex size: 136 gigabytes + known annex keys: 23388 + known annex size: 102 gigabytes + bloom filter size: 16 mebibytes (7.2% full) + backend usage: + SHA256E: 37453 + SHA256: 22145 + (Recording state in git...) + + greg@eeepc:/mnt/blackbox/Media/Pictures/Photos$ apt-cache policy git-annex + git-annex: + Installed: 3.20121017 + Candidate: 3.20121017 + Version table: + *** 3.20121017 0 + 600 http://ftp.us.debian.org/debian/ unstable/main i386 Packages + 100 /var/lib/dpkg/status + 3.20120629 0 + 650 http://ftp.us.debian.org/debian/ wheezy/main i386 Packages diff --git a/doc/bugs/three_character_directories_created.mdwn b/doc/bugs/three_character_directories_created.mdwn new file mode 100644 index 0000000..68dd7bc --- /dev/null +++ b/doc/bugs/three_character_directories_created.mdwn @@ -0,0 +1,51 @@ +What steps will reproduce the problem? + +I don't know how, but this happened already a second time, I don't know how to reproduce this yet. + +What is the expected output? What do you see instead? + +There are many three character name directories created that look like the parts of a hash - and each contains a logfile. + + % ls + 027 + 1d1 + 1e3 + 232 + 36e + 583 + 5f6 + 69c + 9ea + bd7 + c46 + d20 + d48 + f31 + f88 + uuid.log + [+the intended data directories] + + % find 027/ + 027/ + 027//b73 + 027//b73/SHA256-s10108928--3c3766aed8b66de9d0ef37820e0ddfba25f9463b37f30e25ceb5ce3cdf12db36.log + + % cat f88/7c3/SHA256-s4100608--903530747dfdc7bc9d487d7cbd8ab09ddc1ffad52c08849d049c8a5ff5cfb854.log + 1351711677.187589s 1 2efd46d2-0e32-11e2-95fe-f73f09c6615e + 1351971337.667243s 1 ab50cd8a-11c0-11e2-934c-87e45f64e5c6 + +What version of git-annex are you using? On what operating system? + +% git-annex version +git-annex version: 3.20121017 +local repository version: 3 +default repository version: 3 +supported repository versions: 3 +upgrade supported from repository versions: 0 1 2 + +OS X 10.6.8 + +Please provide any additional information below. + +I use a symlink to the repository to change into it. + diff --git a/doc/bugs/uninit_loses_data_if_git-annex_add_didn__39__t_complete.mdwn b/doc/bugs/uninit_loses_data_if_git-annex_add_didn__39__t_complete.mdwn new file mode 100644 index 0000000..61cfa89 --- /dev/null +++ b/doc/bugs/uninit_loses_data_if_git-annex_add_didn__39__t_complete.mdwn @@ -0,0 +1,15 @@ +* Create a git-annex repo where a lot of files live. +* Start an annex add. +* Kill it. +* git-annex uninit + +Now, whatever files were annexed (ie: moved to .git/annex/objects/) but not committed are lost as the .git/annex directory was deleted. + +I know there are two conflicting issues here: + +1. if there is legitimately unused data in .git/annex then that shouldn't be unannexed +2. the above case where some files were annexed but not committed should be unannexed + +Maybe uninit could check to see if all symlinks currently in the repo pointing to something under .git/annex/objects are committed. If not, commit them then uninit, or just de-annex them. + +> Added a check for this, [[done]] --[[Joey]] diff --git a/doc/bugs/unlock_not_working_on_os_x_10.6_-_cp:_illegal_option_--_-_.mdwn b/doc/bugs/unlock_not_working_on_os_x_10.6_-_cp:_illegal_option_--_-_.mdwn new file mode 100644 index 0000000..3704334 --- /dev/null +++ b/doc/bugs/unlock_not_working_on_os_x_10.6_-_cp:_illegal_option_--_-_.mdwn @@ -0,0 +1,22 @@ +What steps will reproduce the problem? + + try to unlock a file in a git annex checkout + +What is the expected output? What do you see instead? + + % git annex unlock FILENAME + unlock FILENAME (copying...) cp: illegal option -- - + usage: cp [-R [-H | -L | -P]] [-fi | -n] [-apvX] source_file target_file + cp [-R [-H | -L | -P]] [-fi | -n] [-apvX] source_file ... target_directory + git-annex: copy failed! + + (should unlock the file) + +What version of git-annex are you using? On what operating system? + + latest git annex osx build as of yesterday (12-11-03) + + +> I've made the `cp` command be included in the OSX standalone build, +> so it will use the same one it's built with. So the next time we get +> an OSX build this will be fixed. [[done]] --[[Joey]] diff --git a/doc/bugs/using_old_remote_format_generates_irritating_output.mdwn b/doc/bugs/using_old_remote_format_generates_irritating_output.mdwn new file mode 100644 index 0000000..d53410c --- /dev/null +++ b/doc/bugs/using_old_remote_format_generates_irritating_output.mdwn @@ -0,0 +1,28 @@ +a special remote (encrypted rsync) that got copied to long ago (not sure when, there are old files that already have sizes in their unencrypted file names) seems to use the aa/bb/GPGHMACSHA1-- format instead of aaa/bbb/GPGHMACSHA1-. ``git annex fsck`` over such files produces very irritating output: + + +fsck L1100423.JPG (gpg) (checking …remote…...) +rsync: change_dir "…somewhere…/0a0/8cd/GPGHMACSHA1--91234b770b34eeff811d09c97ce94bb2398b3d72" failed: No such file or directory (2) + +sent 8 bytes received 12 bytes 40.00 bytes/sec +total size is 0 speedup is 0.00 +rsync error: some files/attrs were not transferred (see previous errors) (code 23) at main.c(1536) [Receiver=3.0.9] + + rsync failed -- run git annex again to resume file transfer + +GPGHMACSHA1--91234b770b34eeff811d09c97ce94bb2398b3d72 + 3922730 100% 623.81kB/s 0:00:06 (xfer#1, to-check=0/1) + +sent 30 bytes received 3923328 bytes 523114.40 bytes/sec +total size is 3922730 speedup is 1.00 +(checksum...) ok + + +(observed with debian's git-annex 3.20121017). + +while this does output an "ok" at th end and a zero exit status, having such messages in an fsck is highly irritating. + +i see two ways to enhance the situation: + +* silence the "not found" error when the file is found in another location +* a way to rename the files in the remote (i guess the aaa/bbb part can be derived from the file name; in that case, that could even be done w/o network interaction). diff --git a/doc/bugs/whereis_outputs_no_informaiton_for_unlocked_files.mdwn b/doc/bugs/whereis_outputs_no_informaiton_for_unlocked_files.mdwn new file mode 100644 index 0000000..cb85199 --- /dev/null +++ b/doc/bugs/whereis_outputs_no_informaiton_for_unlocked_files.mdwn @@ -0,0 +1,44 @@ +What steps will reproduce the problem? + + ...:/tmp$ mkdir repro + ...:/tmp$ cd repro/ + ...:/tmp/repro$ git init + Initialized empty Git repository in /tmp/repro/.git/ + ...:/tmp/repro$ git annex init test + init test ok + ...:/tmp/repro$ echo "A" > a.txt + ...:/tmp/repro$ git annex add a.txt + add a.txt (checksum...) ok + (Recording state in git...) + ...:/tmp/repro$ git commit -m "add file" + [master (root-commit) bf53ce2] add file + 1 file changed, 1 insertion(+) + create mode 120000 a.txt + ...:/tmp/repro$ git annex whereis a.txt + whereis a.txt (1 copy) + 5c028c6a-2c5e-11e2-bb9c-17bd7ce81377 -- here (test) + ok + ...:/tmp/repro$ git annex unlock a.txt + unlock a.txt (copying...) ok + ...:/tmp/repro$ git annex whereis a.txt + +What is the expected output? What do you see instead? + + I'd expect that whereis executed on an unlocked file would behave like whereis executed on a locked file. + +What version of git-annex are you using? On what operating system? + + $ cat /etc/issue + Ubuntu 12.04.1 LTS \n \l + + $ git-annex version + git-annex version: 3.20120406 + local repository version: 3 + default repository version: 3 + supported repository versions: 3 + upgrade supported from repository versions: 0 1 2 + + $ uname -a + Linux ... 3.2.0-31-generic #50-Ubuntu SMP Fri Sep 7 16:16:45 UTC 2012 x86_64 x86_64 x86_64 GNU/Linux + +Please provide any additional information below. diff --git a/doc/coding_style.mdwn b/doc/coding_style.mdwn new file mode 100644 index 0000000..ae76d23 --- /dev/null +++ b/doc/coding_style.mdwn @@ -0,0 +1,92 @@ +If you do nothing else, avoid use of partial functions from the Prelude! +`import Utility.PartialPrelude` helps avoid this by defining conflicting +functions for all the common ones. Also avoid `!!`, it's partial too. + +Use tabs for indentation. The one exception to this rule are +the Hamlet format files in `templates/*`. Hamlet, infuriatingly, refuses +to allow tabs to be used for indentation. + +Code should make sense with any tab stop setting, but 8 space tabs are +the default. With 8 space tabs, code should not exceed 80 characters +per line. (With larger tabs, it may of course.) + +Use spaces for layout. For example, here spaces (indicated with `.`) +are used after the initial tab to make the third test line up with +the others. + + when (foo_test || bar_test || + ......some_other_long_test) + print "hi" + +As a special Haskell-specific rule, "where" clauses are indented with two +spaces, rather than a tab. This makes them stand out from the main body +of the function, and avoids excessive indentation of the where cause content. +The definitions within the where clause should be put on separate lines, +each indented with a tab. + + foo = do + foo + bar + foo + where + foo = ... + bar = ... + +Where clauses for instance definitions and modules tend to appear at the end +of a line, rather than on a separate line. + + module Foo (Foo, mkFoo, unFoo) where + instance MonadBaseControl IO Annex where + +When a function's type signature needs to be wrapped to another line, +it's typical to switch to displaying one parameter per line. + + foo :: Bar -> Baz -> (Bar -> Baz) -> IO Baz + + foo' + :: Bar + -> Baz + -> (Bar -> Baz) + -> IO Baz + +Note that the "::" then starts its own line. It is not put on the same +line as the function name because then it would not be guaranteed to line +up with the "->" at all tab width settings. Similarly, guards are put +on their own lines: + + splat i + | odd i = error "splat!" + | otherwise = i + +Multiline lists and record syntax are written with leading commas, +that line up with the open and close punctuation. + + list = + [ item1 + , item2 + , item3 + ] + + foo = DataStructure + { name = "bar" + , address = "baz" + } + +Module imports are separated into two blocks, one for third-party modules, +and one for modules that are part of git-annex. (Additional blocks can be used +if it makes sense.) + +Using tabs for indentation makes use of `let .. in` particularly tricky. +There's no really good way to bind multiple names in a let clause with +tab indentation. Instead, a where clause is typically used. To bind a single +name in a let clause, this is sometimes used: + + foo = let x = 42 + in x + (x-1) + x + +----- + +If you feel that this coding style leads to excessive amounts of horizontal +or vertical whitespace around your code, making it hard to fit enough of it +on the screen, consider finding a better abstraction, so the code that +does fit on the screen is easily understandable. ;) diff --git a/doc/design.mdwn b/doc/design.mdwn index dc66d5c..6e45df5 100644 --- a/doc/design.mdwn +++ b/doc/design.mdwn @@ -2,3 +2,5 @@ git-annex's high-level design is mostly inherent in the data that it stores in git, and alongside git. See [[internals]] for details. See [[encryption]] for design of encryption elements. + +See [[assistant]] for the design site for the git-annex [[/assistant]]. diff --git a/doc/design/assistant.mdwn b/doc/design/assistant.mdwn index f3e35c3..ccc217a 100644 --- a/doc/design/assistant.mdwn +++ b/doc/design/assistant.mdwn @@ -8,11 +8,11 @@ and use cases to add. Feel free to chip in with comments! --[[Joey]] * Month 1 "like dropbox": [[!traillink inotify]] [[!traillink syncing]] * Month 2 "shiny webapp": [[!traillink webapp]] [[!traillink progressbars]] * Month 3 "easy setup": [[!traillink configurators]] [[!traillink pairing]] +* Month 4 "cloud": [[!traillink cloud]] [[!traillink transfer_control]] We are, approximately, here: -* Month 4 "cloud": [[!traillink cloud]] [[!traillink transfer_control]] -* Month 5 user-driven features (see [[polls]]) +* Month 5 "cloud continued": [[!traillink xmpp]] [[polls]] * Months 6-7 "9k bonus round": [[!traillink Android]] [[!traillink partial_content]] [[!traillink leftovers]] * Months 8-11: more user-driven features and polishing (see remaining TODO items in all pages above) * Month 12: "Windows purgatory" [[Windows]] @@ -26,6 +26,7 @@ We are, approximately, here: * [[desymlink]] * [[deltas]] * [[leftovers]] +* [[other todo items|todo]] ## blog diff --git a/doc/design/assistant/blog/day_108__another_zombie_outbreak.mdwn b/doc/design/assistant/blog/day_108__another_zombie_outbreak.mdwn new file mode 100644 index 0000000..ee46073 --- /dev/null +++ b/doc/design/assistant/blog/day_108__another_zombie_outbreak.mdwn @@ -0,0 +1,33 @@ +I released git-annex an unprecidented two times yesterday, because just +after the first release, I learned of a another zombie problem. Turns out +this zombie had existed for a while, but it was masked by zombie reaping +code that I removed recently, after fixing most of the other zombie +problems. This one, though, is not directly caused by git-annex. When rsync +runs ssh, it seems to run two copies, and one is left unwaited on as a +zombie. Oddly, this only happens when rsync's stdout is piped into +git-annex, for progress bar handling. I have not source-dived rsync's code +to get to the bottom of this, but I put in a workaround. + +I did get to the bottom of yesterday's runaway dbus library. Got lucky and +found the cause of the memory leak in that library on the first try, which +is nice since each try involved logging out of X. I've been corresponding +with its author, and a fix will be available soon, and then git-annex will +need some changes to handle dbus reconnection. + +----- + +For the first time, I'm starting to use the assistant on my own personal +git-annex repo. The preferred content and group settings let me configure it +use the complex system of partial syncing I need. For example, I have this +configured for my sound files, keeping new podcasts on a server until they land +somewhere near me. And keeping any sound files that I've manually put on my +laptop, and syncing new podcasts, but not other stuff. + + # (for my server) + preferred-content 87e06c7a-7388-11e0-ba07-03cdf300bd87 = include=podcasts/* and (not copies=nearjoey:1) + # (for my laptop) + preferred-content 0c443de8-e644-11df-acbf-f7cd7ca6210d = exclude=*/out/* and (in=here or (include=podcasts/*)) + +Found and fixed a bug in the preferred content matching code, where +if the assistant was run in a subdirectory of the repo, it failed to +match files correctly. diff --git a/doc/design/assistant/blog/day_109__dropping.mdwn b/doc/design/assistant/blog/day_109__dropping.mdwn new file mode 100644 index 0000000..210f71f --- /dev/null +++ b/doc/design/assistant/blog/day_109__dropping.mdwn @@ -0,0 +1,16 @@ +Got unwanted content to be dropped from the local repo, as well as remotes +when doing the expensive scan. I kept the scan a single pass for now, +need to revisit that later to drop content before transferring more. +Also, when content is downloaded or uploaded, this can result in it +needing to be dropped from somewhere, and the assistant handles that too. + +There are some edge cases with hypothetical, very weird preferred +content expressions, where the assistant won't drop content right away. +(But will later in the expensive scan.) Other than those, I think I have +nearly all content dropping sorted out. The only common case I know of where +unwanted content is not dropped by the assistant right away is when a file +is renamed (eg, put in a "Trash" directory). + +In other words, repositories put into the transfer group will now work as +described, only retaining content as long as is needed to distribute it to +clients. Big milestone! diff --git a/doc/design/assistant/blog/day_110__more_dropping.mdwn b/doc/design/assistant/blog/day_110__more_dropping.mdwn new file mode 100644 index 0000000..afa256c --- /dev/null +++ b/doc/design/assistant/blog/day_110__more_dropping.mdwn @@ -0,0 +1,55 @@ +Got preferred content checked when files are moved around. +So, in repositories in the default client group, if you make a +"archive" directory and move files to it, the assistant will drop +their content (when possible, ie when it's reached an archive or backup). +Move a file out of an archive directory, and the assistant will get its +content again. Magic. + +Found an intractable bug, obvious in retrospect, with the git-annex branch +read cache, and had to remove that cache. I have not fully determined +if this will slow down git-annex in some use cases; might need to add more +higher-level caching. It was a very minimal cache anyway, just of one file. + +Removed support for "in=" from preferred content expressions. That was +problimatic in two ways. First, it referred to a remote by name, but +preferred content expressions can be evaluated elsewhere, where that remote +doesn't exist, or a different remote has the same name. This name lookup +code could error out at runtime. Secondly, "in=" seemed pretty useless, and +indeed counterintuitive in preferred content expressions. "in=here" did not +cause content to be gotten, but it did let present content be dropped. +Other uses of "in=" are better handled by using groups. + +In place of "in=here", preferred content expressions can now use "present", +which is useful if you want to disable automatic getting or dropping of +content in some part of a repository. Had to document that "not present" +is not a good thing to use -- it's unstable. Still, I find "present" handy +enough to put up with that wart. + +Realized last night that the code I added to the TransferWatcher +to check preferred content once a transfer is done is subject to a race; +it will often run before the location log gets updated. Haven't found a good +solution yet, but this is something I want working now, so I did put in a +quick delay hack to avoid the race. Delays to avoid races are never a real +solution, but sometimes you have to TODO it for later. + +---- + +Been thinking about how to make the assistant notice changes to configuration +in the git-annex branch that are merged in from elsewhere while it's running. +I'd like to avoid re-reading unchanged configuration files after each merge +of the branch. + +The most efficient way would be to reorganise the git-annex branch, moving +config files into a configs directory, and logs into a logs directory. Then it +could `git ls-tree git-annex configs` and check if the sha of the configs +directory had changed, with git doing minimal work +(benchmarked at 0.011 seconds). + +Less efficiently, keep the current git-annex branch layout, and +use: `git ls-tree git-annex uuid.log remote.log preferred-content.log group.log trust.log` +(benchmarked at 0.015 seconds) + +Leaning toward the less efficient option, with a rate limiter so it +doesn't try more often than once every minute. Seems reasonable for it to +take a minute for config changes take effect on remote repos, even +if the assistant syncs file changes to them more quickly. diff --git a/doc/design/assistant/blog/day_111__config_monitor.mdwn b/doc/design/assistant/blog/day_111__config_monitor.mdwn new file mode 100644 index 0000000..8addf54 --- /dev/null +++ b/doc/design/assistant/blog/day_111__config_monitor.mdwn @@ -0,0 +1,18 @@ +Added yet another thread, the ConfigMonitor. Since that thread needs to run +code to reload cached config values from the git-annex branch when files +there change, writing it also let me review where config files are cached, +and I found that every single config file in the git-annex branch does +get cached, with the exception of the uuid.log. So, added a cache for that, +and now I'm more sanguine about yesterday's removal of the lower-level +cache, because the only thing not being cached is location log information. + +The ConfigMonitor thread seems to work, though I have not tested it +extensively. The assistant should notice and apply config changes +made locally, as well as any config changes pushed in from remotes. +So, for example, if you add a S3 repo in the webapp, and are paired with +another computer, that one's webapp will shortly include the new repo in +its list. And all the preferred content, groups, etc settings will +propigate over and be used as well. + +Well ... almost. Seems nothing causes git-annex branch changes to be +pushed, until there's some file change to sync out. diff --git a/doc/design/assistant/blog/day_113__notifier_work.mdwn b/doc/design/assistant/blog/day_113__notifier_work.mdwn new file mode 100644 index 0000000..920b48d --- /dev/null +++ b/doc/design/assistant/blog/day_113__notifier_work.mdwn @@ -0,0 +1,22 @@ +Built out the XMPP push notifier; around 200 lines of code. +Haven't tested it yet, but it just might work. It's in the `xmpp` branch +for now. + +I decided to send the UUID of the repo that was pushed to, otherwise +peers would have to speculatively pull from every repo. A wrinkle in this +is that not all git repos have a git-annex UUID. So it might notify that +a push was sent to an unidentified repo, and then peers need to pull from +every such repo. In the common case, there will only be one or a few such +repos, at someplace like at github that doesn't support git-annex. I could +send the URL, but there's no guarantee different clients have the same +URLs for a git remote, and also sending the URL leaks rather more data than +does a random UUID. + +Had a bit of a scare where it looked like I couldn't use the haskell +`network-protocol-xmpp` package together with the `mtl` package that +git-annex already depends on. With help from #haskell I found the way +to get them co-existing, by using the PackageImports extension. Whew! + +Need to add configuration of the XMPP server to use in the webapp, and +perhaps also a way to create `.git/annex/creds/notify-xmpp` from the +command line. diff --git a/doc/design/assistant/blog/day_114__xmpp.mdwn b/doc/design/assistant/blog/day_114__xmpp.mdwn new file mode 100644 index 0000000..617824d --- /dev/null +++ b/doc/design/assistant/blog/day_114__xmpp.mdwn @@ -0,0 +1,56 @@ +Had to toss out my XMPP presence hack. Turns out that, at least in Google +Talk, presence info is not sent to clients that have marked themselves +unavailable, and that means the assistant would not see notifications, as it +was nearly always marked unavailable as part of the hack. + +I tried writing a test program that uses XMPP personal eventing, only +to find that Google Talk rejected my messages. I'm not 100% sure my +messages were right, but I was directly copying the example in the RFC, +and prosody accepted them. I could not seem to get a list of extensions out +of Google Talk either, so I don't know if it doesn't support personal +eventing, or perhaps only supports certian specific types of events. + +So, plan C... using XMPP [presence extended content](http://xmpp.org/rfcs/rfc6121.html#presence-extended). +The assistant generates a presence message tagged "xa" (Extended Away), +which hopefully will make it not seem present to clients. +And to that presence message, I add my own XML element: + + + +This is all entirely legal, and not at all a hack. +(Aside from this not really being presence info.) Isn't XML fun? + +And plan C works, with Google Talk, and prosody. I've successfully gotten +push notifications flowing over XMPP! + +---- + +Spent some hours dealing with an unusual probolem: git-annex started +segfaulting intermittently on startup with the new XMPP code. + +Haskell code is not supposed to segfault.. + +I think this was probably due to not using a bound thread for XMPP, +so if haskell's runtime system recheduled its green thread onto a different +OS thread during startup, when it's setting up TLS, it'd make gnuTLS very +unhappy. + +So, fixed it to use a bound thread. Will wait and see if the crash is gone. + +---- + +Re-enabled DBUS support, using a new version of the library that avoids the +memory leak. Will need further changes to the library to support +reconnecting to dbus. + +---- + +Next will be a webapp configuration UI for XMPP. Various parts of the +webapp will direct the user to set up XMPP, when appropriate, especially +when the user sets up a cloud remote. + +To make XMPP sufficiently easy to configure, I need to check SRV records to +find the XMPP server, which is an unexpected PITA because `getaddrinfo` +can't do that. There are several haskell DNS libraries that I could use for +SRV, or I could use the `host` command: +`host -t SRV _xmpp-client._tcp.gmail.com` diff --git a/doc/design/assistant/blog/day_115__my_new_form.mdwn b/doc/design/assistant/blog/day_115__my_new_form.mdwn new file mode 100644 index 0000000..d445650 --- /dev/null +++ b/doc/design/assistant/blog/day_115__my_new_form.mdwn @@ -0,0 +1,17 @@ +Built a SRV lookup library that can use either `host` or ADNS. + +Worked on DBUS reconnection some more; found a FD leak in the dbus library, +and wrote its long-suffering author, John Millikin (also the XMPP library +author, so I've been bothering him a lot lately), who once again came +through with a quick fix. + +Built a XMPP configuration form, that tests the connection to the server. +Getting the wording right on this was hard, and it's probably still not +100% right. + +[[!img /assistant/xmpp.png]] + +Pairing over XMPP is something I'm still thinking about. It's +contingent on tunneling git over XMPP (actually not too hard), +and getting a really secure XMPP connection (needs library improvements, +as the library currently accepts any SSL certificate). diff --git a/doc/design/assistant/blog/day_116__the_segfault.mdwn b/doc/design/assistant/blog/day_116__the_segfault.mdwn new file mode 100644 index 0000000..4763345 --- /dev/null +++ b/doc/design/assistant/blog/day_116__the_segfault.mdwn @@ -0,0 +1,25 @@ +Continuing to flail away at this XMPP segfault, which turned out not to be +fixed by bound threads. I managed to make a fairly self-contained and small +reproducible test case for it that does not depend on the network. +Seems the bug is gonna be either in the Haskell binding for GNUTLS, +or possibly in GNUTLS itself. + +Update: John was able to fix it using my testcase! It was a GNUTLS +credentials object that went out of scope and got garbage collected. +I think I was seeing the crash only with the threaded runtime because +it has a separate garbage collection thread. + +---- + +Arranged for the XMPP thread to restart when network connections +change, as well as when the webapp configures it. + +Added an alert to nudge users to enable XMPP. It's displayed after adding a +remote in the cloud. + +[[!img /assistant/xmppnudge.png]] + +---- + +So, the first stage of XMPP is done. But so far all it does is push +notification. Much more work to do here. diff --git a/doc/design/assistant/blog/day_117__new_topologies.mdwn b/doc/design/assistant/blog/day_117__new_topologies.mdwn new file mode 100644 index 0000000..f62da3c --- /dev/null +++ b/doc/design/assistant/blog/day_117__new_topologies.mdwn @@ -0,0 +1,41 @@ +Back in [[day_85__more_foundation_work]], I wrote: + +> I suspect, but have not proven, +> that the assistant is able to keep repos arranged in any shape of graph in +> sync, as long as it's connected (of course) and each connection is +> bi-directional. [And each node is running the assistant.] + +After today's work, many more graph topologies can be kept in sync -- the +assistant now can keep repos in sync that are not directly connected, but +must go through a central transfer point, which does not run the assistant +at all. Major milestone! + +To get that working, as well as using XMPP push notifications, it turned +out to need to be more agressive about pushing out changed location log +information. And, it seems, that was the last piece that was missing. +Although I narrowly avoided going down a blind alley involving sending +transfer notifications over XMPP. Luckily, I came to my senses. + +---- + +This month's focus was the cloud, and the month is almost done. And now +the assistant can, indeed be used to sync over the cloud! I would have +liked to have gotten on to implementing Amazon Glacier or Google Drive +support, but at least the cloud fundamentals are there. + +Now that I have XMPP support, I'm tending toward going ahead and adding +XMPP pairing, and git push over XMPP. This will open up lots of excellent +use cases. + +So, how to tunnel git pushes over XMPP? Well, `GIT_SHELL` can be set to +something that intercepts the output of `git-send-pack` and +`git-receive-pack`, and that data can be tunneled through XMPP to connect +them. Probably using XMPP ping. +(XEP-0047: In-Band Bytestreams would be the right way ... +but of course Google Talk doesn't support that extension.) + +XMPP requires ugly encoding that will bloat the data, but the data +quantities are fairly small to sync up a few added or moved files +(of course, we'll not be sending file contents over XMPP). Pairing with +an large git repository over XMPP will need rather more bandwidth, +of course. diff --git a/doc/design/assistant/blog/day_118__monadic_discontinuity.mdwn b/doc/design/assistant/blog/day_118__monadic_discontinuity.mdwn new file mode 100644 index 0000000..758b269 --- /dev/null +++ b/doc/design/assistant/blog/day_118__monadic_discontinuity.mdwn @@ -0,0 +1,15 @@ +Spent most of the past day moving the assistant into a monad of its own +that encapsulates all the communications channels for its threads. This +involved modifiying nearly every line of code in the whole assistant. + +Typical change: + +[[!format haskell """ +handleConnection threadname st dstatus scanremotes pushnotifier = do + reconnectRemotes threadname st dstatus scanremotes (Just pushnotifier) + =<< networkRemotes st + +handleConnection = reconnectRemotes True =<< networkRemotes +"""]] + +So, it's getting more readable.. diff --git a/doc/design/assistant/blog/day_119__time_for_testing.mdwn b/doc/design/assistant/blog/day_119__time_for_testing.mdwn new file mode 100644 index 0000000..adf1d0d --- /dev/null +++ b/doc/design/assistant/blog/day_119__time_for_testing.mdwn @@ -0,0 +1,12 @@ +Finished working the new assistant monad into all the assistant's code. +I've changed 1870 lines of code in the past two days. It feels like more. +While the total number of lines of code has gone up by around 100, the +actual code size has gone *down*; the monad allowed dropping 3.4 kilobytes +of manual variable threading complications. Or around 1% of a novel edited +away, in other words. + +I don't seem to have broken anything, but I'm started an extensive test +of all the assistant and webapp. So far, the bugs I've found were not +introduced by my monadic changes. Fixed several bugs around adding +removable drives, and a few other minor bugs. Plan to continue testing +tomorrow. diff --git a/doc/design/assistant/blog/day_120__test_day.mdwn b/doc/design/assistant/blog/day_120__test_day.mdwn new file mode 100644 index 0000000..5482229 --- /dev/null +++ b/doc/design/assistant/blog/day_120__test_day.mdwn @@ -0,0 +1,2 @@ +Did a lot of testing, found and fixed 4 bugs with repository setup +configurators. None of them were caused by the recent code reworking. diff --git a/doc/design/assistant/blog/day_121__buddy_list.mdwn b/doc/design/assistant/blog/day_121__buddy_list.mdwn new file mode 100644 index 0000000..48ea1ff --- /dev/null +++ b/doc/design/assistant/blog/day_121__buddy_list.mdwn @@ -0,0 +1,10 @@ +Got the XMPP client maintaining a list of buddies, including tracking which +clients are present and away, and which clients are recognised as other +git-annex assistant clients. This was fun, it is almost all pure +functional code, which always makes me happy. + +Started building UI for XMPP pairing. So far, I have it showing a list of +buddies who are also running git-annex (or not). The list even refreshes +in real time as new buddies come online. + +[[!img /assistant/buddylist.png]] diff --git a/doc/design/assistant/blog/day_122__xmpp_pairing.mdwn b/doc/design/assistant/blog/day_122__xmpp_pairing.mdwn new file mode 100644 index 0000000..8683827 --- /dev/null +++ b/doc/design/assistant/blog/day_122__xmpp_pairing.mdwn @@ -0,0 +1,29 @@ +Reworked my XMPP code, which was still specific to push notification, into +a more generic XMPP client, that's based on a very generic NetMessager +class, that the rest of the assistant can access without knowing anything +about XMPP. + +Got pair requests flowing via XMPP ping, over Google Talk! And when the +webapp receives a pair request, it'll pop up an alert to respond. The rest +of XMPP pairing should be easy to fill in from here. + +To finish XMPP pairing, I'll need git pull over XMPP, which is nontrivial, +but I think I know basically how to do. And I'll need some way to represent +an XMPP buddy as a git remote, which is all that XMPP pairing will really +set up. + +It could be a git remote using an `xmpp:user@host` URI for the git url, but +that would confuse regular git to no end (it'd think it was a ssh host), +and probably need lots of special casing in the parts of git-annex that +handle git urls too. Or it could be a git remote without an url set, and +use another config field to represent the XMPP data. But then git wouldn't +think it was a remote at all, which would prevent using "git pull +xmppremote" at all, which I'd like to be able to use when implementing git +pull over XMPP. + +Aha! The trick seems to be to leave the url unset in git config, +but temporarily set it when pulling: + + GIT_SSH=git-annex git git -c remote.xmppremote.url=xmpp:client pull xmppremote + +Runs git-annex with "xmpp git-upload-pack 'client'".. Just what I need. diff --git a/doc/design/assistant/blog/day_123__xmpp_insanity.mdwn b/doc/design/assistant/blog/day_123__xmpp_insanity.mdwn new file mode 100644 index 0000000..bfbc306 --- /dev/null +++ b/doc/design/assistant/blog/day_123__xmpp_insanity.mdwn @@ -0,0 +1,49 @@ +Spent about 5 hours the other night in XMPP hell. At every turn Google Talk +exhibited behavior that may meet the letter of the XMPP spec (or not), but +varies between highly annoying and insane. + +By "insane", I mean this: If a presence message is directed from one client +to another client belonging to that same user, randomly leaking that message +out to other users who are subscribed is just a security hole waiting to +happen. + +Anyway, I came out of that with a collection of hacks that worked, but I +didn't like. I was using directed presence for buddy-to-buddy pairing, and +an IQ message hack for client-to-client pairing. + +Today I got chat messages working instead, for both sorts of pairing. These +chat messages have an empty body, which *should* prevent clients from +displaying them, but they're sent directed to only git-annex clients +anyway. + +---- + +And XMPP pairing 100% works now! Of course, it doesn't know how to git pull +over XMPP yet, but everything else works. + +Here's a real `.git/config` generated by the assistant after XMPP pairing. + + [remote "joey"] + url = + fetch = +refs/heads/*:refs/remotes/joey/* + annex-uuid = 14f5e93e-1ed0-11e2-aa1c-f7a45e662d39 + annex-xmppaddress = joey@kitenet.net + +---- + +Fixed a typo that led to an infinite loop when adding a ssh git repo with the +assistant. Only occurred when an absolute directory was specified, which +is why I didn't notice it before. + +---- + +Security fix: Added a `GIT_ANNEX_SHELL_DIRECTORY` environment variable that +locks down git-annex-shell to operating in only a single directory. The +assistant sets that in ssh `authorized_keys` lines it creates. This +prevents someone you pair with from being able to access any other git or +git-annex repositories you may have. + +---- + +Next up, more craziness. But tomorrow is Nov 6th, so you in the US already +knew that.. diff --git a/doc/design/assistant/blog/day_124__git_push_over_xmpp_groundwork.mdwn b/doc/design/assistant/blog/day_124__git_push_over_xmpp_groundwork.mdwn new file mode 100644 index 0000000..ecb6023 --- /dev/null +++ b/doc/design/assistant/blog/day_124__git_push_over_xmpp_groundwork.mdwn @@ -0,0 +1,28 @@ +Laying the groundwork for git push over XMPP. BTW, the motivation for doing +this now is that if the assistant can push git data peer-to-peer, users +who are entirely using the cloud don't need to set up a git repo in the +cloud somewhere. Instead, a single special remote in the cloud will be all +they need. So this is a keystone in the assistant's cloud support. + +I'm building the following pipeline: + + git push <--> git-annex xmppgit <--> xmppPush <-------> xmpp + | + git receive-pack <--> xmppReceivePack <---------------> xmpp + +A tricky part of this is `git-annex xmppgit`, which is run by `git push` +rather than the usual `ssh`. Rather than speak XMPP itself, that feeds the +data through the assistant daemon, using some special FDs that are set +up by the assistant when it runs `git push`, and communicated via +environment variables. I hoped to set up a pipe and not need it to do any +work on its own, but short of using the linux-specific `splice(2)`, that +doesn't seem possible. It also will receive the exit status of +`git receive-pack` and propigate it to `git push`. + +Also built the IO sides of `xmppPush` and `xmppReceivePack` although these +are not tested. The XMPP sides of them come next. + +---- + +Stuffing lots of git-annex branded USB keys into envelopes tonight, while +watching the election coverage. diff --git a/doc/design/assistant/blog/day_125__xmpp_push_continues.mdwn b/doc/design/assistant/blog/day_125__xmpp_push_continues.mdwn new file mode 100644 index 0000000..010ab14 --- /dev/null +++ b/doc/design/assistant/blog/day_125__xmpp_push_continues.mdwn @@ -0,0 +1,15 @@ +I've finished building the XMMP side of git push over XMPP. Now I only +have to add code to trigger these pushes. And of course, fix all the bugs, +since none of this has been tested at all. + +Had to deal with some complications, like handling multiple clients that +all want to push at the same time. Only one push is handled at a time; +messages for others are queued. Another complication I don't deal with yet +is what to do if a client stops responding in the middle of a push. It +currently will wait forever for a message from the client; instead it +should time out. + +---- + +Jimmy got the OSX builder working again, despite my best attempts to add +dependencies and break it. diff --git a/doc/design/assistant/blog/day_126__mr_watson_come_here.mdwn b/doc/design/assistant/blog/day_126__mr_watson_come_here.mdwn new file mode 100644 index 0000000..78d263b --- /dev/null +++ b/doc/design/assistant/blog/day_126__mr_watson_come_here.mdwn @@ -0,0 +1,52 @@ +I'm stunned and stoked to have gotten git push over XMPP working today. +And am nearly out of steam, it was a wild ride.. + + To xmpp::joey@kitenet.net + * [new branch] master -> refs/xmpp/newmaster + +The surprising part is how close my initial implementation came to just +working on the first try. It had around 3 bugs, which took hours of staring +at debugging output to find: + +1. The git push action was run in the same thread as the XMPP + client, which prevented the client from continuing to run and relaying + messages. +2. The git-receive-pack side waited on the wrong thread, so didn't + notice when the program was done. +3. I accidentially used the wrong attribute name when sending a ReceivePackDone + message. + +But all in all, it just worked. + +Here's a sample of the actual data sent when one file is added to the +repository (also includes the corresponding update to the git-annex branch): + + MDA4NjhhMmNmOGZjMWE3MTlkOGVjOWVmOWZiMGZiNjVlODc2NjQ1NDAyMTAgODIwNTZjMDM4 + ZjU2YzE1ODdjYzllOWRhNzQzMzU0YjE4NzNjZWJlOSByZWZzL3htcHAvbmV3bWFzdGVyACBy + ZXBvcnQtc3RhdHVzIHNpZGUtYmFuZC02NGswMDAw + + UEFDSwAAAAIAAAADnAx4nJXLTQ4CIQxA4T2n4AKaAqVAYoxL4y2gU+Jo5iczdeHtnSu4eMm3 + ebqJ2NwgSCLmNkTBlKFCYwwhoHOtQ+scqZCwWesms9pcPffc2dXkypCFi/TSG/RGUXIiwojg + HZj60eey2cciX3uXfbeX18Hbe1SZRc9HV+tC9FgyJW9PgACGl2kaVeXfz/wArHQ81qMGeJwz + NDIAAoVUI4ZZB9RW1E8NtXp5t77/fn3hw41cl2MNIbIZqTk5+Qwerw+aJX2INjsffYndtdCz + 5mZWLDdUQV5qeVpmDtCQnx/3/6s40+Q4P/7O+Y4ShS+1Ad83AwC6CirftAt4nK3MsRGDMAwF + 0IkcSVgSdpkidzRUmcDWBy4pSAEFl+mzRN4A77a9Tmr7vlz06e8lzoPmmb5Mz+k+mD/SkTkl + eFHPq9eqQ+nSzFsWaDFnFmCMCEOvHgLrCrQxS7AWdvUVhv9uPwHxMbfumlvWdco1RLL4wSQF + g0uFFOKu3Q== + +Git said this push took 385 bytes; after base64 encoding to transport it over +XMPP as shown above, it needs 701 bytes, and the XMPP envelope and encryption +adds more overhead (although the XMPP connection may also be compressed?) + +Not the most efficient git transport, but still a practical one! + +---- + +Big thanks by the way to meep, who posted a comment reminding me about +`git-remote-helpers`. This was the right thing to use for XMPP over git, +it lets the git remote be configured with `url = xmpp::user@host`. + +---- + +Next, I need to get the assistant to use this for syncing. Currently, it only +pushes a test branch. diff --git a/doc/design/assistant/blog/day_127__xmpp_syncs.mdwn b/doc/design/assistant/blog/day_127__xmpp_syncs.mdwn new file mode 100644 index 0000000..7f95f87 --- /dev/null +++ b/doc/design/assistant/blog/day_127__xmpp_syncs.mdwn @@ -0,0 +1,35 @@ +I got full-on git-annex assistant syncing going over XMPP today! + +How well does it work? Well, I'm at the cabin behind a dialup modem. I have +two repos that can only communicate over XMPP. One uses my own XMPP server, +and the other uses a Google Talk account. I make a file in one repo, and +switch windows to the other, and type `ls`, and the file (not its content +tho..) has often already shown up. So, it's about as fast as syncing over +ssh, although YMMV. + +---- + +Refactored the git push over XMPP code rather severely. It's quite a +lot cleaner now. + +---- + +Set XMPP presence priority to a negative value, which will hopefully +prevent git-annex clients that share a XMPP account with other clients from +intercepting chat messages. Had to change my XMPP protocol some to deal +with this. + +---- + +Some webapp UI work. When showing the buddy list, indicate which buddies +are already paired with. + +After XMPP pairing, it now encourages setting up a shared cloud repository. + +[[!img /assistant/xmpppairingend.png]] + +I still need to do more with the UI after XMPP pairing, to help the paired +users configure a shared cloud transfer remote. Perhaps the thing to do is +for the ConfigMonitor to notice when a git push adds a new remote, +and pop up an alert suggesting the user enable it. Then one user +can create the repository, and the other one enable it. diff --git a/doc/design/assistant/blog/day_128__last_xmpp_day.mdwn b/doc/design/assistant/blog/day_128__last_xmpp_day.mdwn new file mode 100644 index 0000000..2c7d70a --- /dev/null +++ b/doc/design/assistant/blog/day_128__last_xmpp_day.mdwn @@ -0,0 +1,49 @@ +I hope I'm nearly at the end of this XMPP stuff after today. Planning a new +release tomorrow. + +---- + +Split up the local pairing and XMPP pairing UIs, and wrote a +[[/assistant/share_with_a_friend_walkthrough]]. + +---- + +Got the XMPP push code to time out if expected data doesn't arrive within +2 minutes, rather than potentially blocking other XMPP push forever if +the other end went away. + +I pulled in the Haskell +[async](http://hackage.haskell.org/package/async) library for this, +which is yes, yet another library, but one that's now in the haskell platform. +It's worth it, because of how nicely it let me implement IO actions that +time out. + +[[!format haskell """ +runTimeout :: Seconds -> IO a -> IO (Either SomeException a) +runTimeout secs a = do + runner <- async a + controller <- async $ do + threadDelaySeconds secs + cancel runner + cancel controller `after` waitCatch runner +"""]] + +This would have been 20-50 lines of gnarly code without async, and I'm sure +I'll find more uses for async in the future. + +---- + +Discovered that the XMPP push code could deadlock, if both clients started +a push to the other at the same time. I decided to fix this by allowing +each client to run both one push and one receive-pack over XMPP at the same +time. + +---- + +Prevented the transfer scanner from trying to queue transfers to XMPP remotes. + +---- + +Made XMPP pair requests that come from the same account we've already +paired with be automatically accepted. So once you pair with one device, +you can easily add more. diff --git a/doc/design/assistant/cloud.mdwn b/doc/design/assistant/cloud.mdwn index 653ebb4..9bb8696 100644 --- a/doc/design/assistant/cloud.mdwn +++ b/doc/design/assistant/cloud.mdwn @@ -17,7 +17,9 @@ More should be added, such as: * [nimbus.io](https://nimbus.io/) Fairly low prices ($0.06/GB); REST API; free software -## The cloud notification problem +See poll at [[polls/prioritizing_special_remotes]]. + +## The cloud notification problem **done** Alice and Bob have repos, and there is a cloud remote they both share. Alice adds a file; the assistant transfers it to the cloud remote. @@ -28,7 +30,7 @@ been a change to Alice's git repo. Then he needs to pull from Alice's git repo, or some other repo in the cloud she pushed to. Once both steps are done, the assistant will transfer the file from the cloud to Bob. -* dvcs-autosync uses jabber; all repos need to have the same jabber account +* dvcs-autosync uses xmppp; all repos need to have the same xmpp account configured, and send self-messages. An alternative would be to have different accounts that join a channel or message each other. Still needs account configuration. @@ -42,7 +44,9 @@ the assistant will transfer the file from the cloud to Bob. * pubsubhubbub does not seem like an option; its hubs want to pull down a feed over http. -## storing git repos in the cloud +See [[xmpp]] for design of git-annex's use of xmpp for push notifications. + +## storing git repos in the cloud **done for XMPP** Of course, one option is to just use github etc to store the git repo. @@ -52,4 +56,4 @@ Two things can store git repos in Amazon S3: Another option is to not store the git repo in the cloud, but push/pull peer-to-peer. When peers cannot directly talk to one-another, this could be -bounced through something like XMPP. +bounced through something like XMPP. This is **done** for [[xmpp]]! diff --git a/doc/design/assistant/pairing.mdwn b/doc/design/assistant/pairing.mdwn index d09c644..30b42a2 100644 --- a/doc/design/assistant/pairing.mdwn +++ b/doc/design/assistant/pairing.mdwn @@ -81,3 +81,4 @@ is escaped before going to the browser. It should be possible for third parties to tell when pairing is done, but it's actually rather hard since they don't necessarily share the secret. +* Pairing over XMPP. diff --git a/doc/design/assistant/polls/prioritizing_special_remotes.mdwn b/doc/design/assistant/polls/prioritizing_special_remotes.mdwn index b576c9e..2f7f4bc 100644 --- a/doc/design/assistant/polls/prioritizing_special_remotes.mdwn +++ b/doc/design/assistant/polls/prioritizing_special_remotes.mdwn @@ -6,7 +6,7 @@ locally paired systems, and remote servers with rsync. Help me prioritize my work: What special remote would you most like to use with the git-annex assistant? -[[!poll open=yes 15 "Amazon S3 (done)" 9 "Amazon Glacier" 7 "Box.com" 57 "My phone (or MP3 player)" 15 "Tahoe-LAFS" 5 "OpenStack SWIFT" 17 "Google Drive"]] +[[!poll open=yes 15 "Amazon S3 (done)" 11 "Amazon Glacier" 8 "Box.com" 61 "My phone (or MP3 player)" 15 "Tahoe-LAFS" 5 "OpenStack SWIFT" 23 "Google Drive"]] This poll is ordered with the options I consider easiest to build listed first. Mostly because git-annex already supports them and they diff --git a/doc/design/assistant/todo.mdwn b/doc/design/assistant/todo.mdwn new file mode 100644 index 0000000..d9aaef6 --- /dev/null +++ b/doc/design/assistant/todo.mdwn @@ -0,0 +1,4 @@ +This is a subset of [[/todo]] for items tagged for the assistant. +Link items to [[todo/done]] when done. + +[[!inline pages="tagged(design/assistant)" show=0 archive=yes]] diff --git a/doc/design/assistant/transfer_control.mdwn b/doc/design/assistant/transfer_control.mdwn index 6348ab2..ad5578c 100644 --- a/doc/design/assistant/transfer_control.mdwn +++ b/doc/design/assistant/transfer_control.mdwn @@ -10,41 +10,40 @@ something smart with such remotes. ## TODO -* preferred content settings made in the webapp (or in vicfg, or synced over) are not noticed. - -### dropping no longer preferred content TODO +* The expensive scan currently makes one pass, dropping content at the same + time more uploads and downloads are queued. It would be better to drop as + much content as possible upfront, to keep the total annex size as small + as possible. How to do that without making two expensive scans? +* The TransferWatcher's finishedTransfer function relies on the location + log having been updated after a transfer. But there's a race; if the + log is not updated in time, it will fail to drop unwanted content. + (There's a 10 second sleep there now to avoid the race, but that's hardly + a fix.) + +### dropping no longer preferred content When a file is renamed, it might stop being preferred, so could be checked and dropped. (If there's multiple links to the same content, this gets tricky. Let's assume there are not.) -* When a file is sent or received, the sender's preferred content - settings may change, causing it to be dropped from the sender. -* May also want to check for things to drop, from both local and remotes, - when doing the expensive trasfer scan. - ### analysis of changes that can result in content no longer being preferred 1. The preferred content expression can change, or a new repo is added, or groups change. Generally, some change to global annex state. Only way to deal with this is an expensive scan. (The rest of the items below come from - analizing the terminals used in preferred content expressions.) -2. renaming of a file (ie, moved to `archive/`) -3. some other repository gets the file (`in`, `copies`) -4. some other repository drops the file (`in`, `copies` .. However, it's + analizing the terminals used in preferred content expressions.) **done** +2. renaming of a file (ie, moved to `archive/`) **done** + (note also that renaming a file can also make it become preferred content + again, and should cause it to be transferred in that case) **done** +3. we get a file (`in`, `copies`) **done** +4. we sent a file (`in`, `copies`) **done** +5. some other repository drops the file (`in`, `copies` .. However, it's unlikely that an expression would prefer content when *more* copies exisited, and want to drop it when less do. That's nearly a pathological case.) -5. `migrate` is used to change a backend (`inbackend`; unlikely) - -That's all! Of these, 2 and 3 are by far the most important. - -Rename handling should certianly check 2. +6. `migrate` is used to change a backend (`inbackend`; unlikely) -One place to check for 3 is after transferring a file; but that does not -cover all its cases, as some other repo could transfer the file. To fully -handle 3, need to either use a full scan, or examine location log history -when receiving a git-annex branch push. +That's all! Of these, 1-4 are by far the most important. ## specifying what data a remote prefers to contain **done** diff --git a/doc/design/assistant/xmpp.mdwn b/doc/design/assistant/xmpp.mdwn new file mode 100644 index 0000000..e31a149 --- /dev/null +++ b/doc/design/assistant/xmpp.mdwn @@ -0,0 +1,127 @@ +The git-annex assistant uses XMPP to communicate between peers that +cannot directly talk to one-another. A typical scenario is two users +who share a repository, that is stored in the [[cloud]]. + +### TODO + +* Prevent idle disconnection. Probably means sending or receiving pings, + but would prefer to avoid eg pinging every 60 seconds as some clients do. +* Do git-annex clients sharing an account with regular clients cause confusing + things to happen? + See +* Assistant.Sync.manualPull doesn't handle XMPP remotes yet. + This is needed to handle getting back in sync after reconnection. +* When pairing, sometimes both sides start to push, and the other side + sends a PushRequest, and the two deadlock, neither doing anything. + (Timeout eventually breaks this.) + Maybe should allow one push and one receive-pack at a time? + +## design goals + +1. Avoid user-visible messages. dvcs-autosync uses XMPP similarly, but + sends user-visible messages. Avoiding user-visible messages lets + the user configure git-annex to use his existing XMPP account + (eg, Google Talk). + +2. Send notifications to buddies. dvcs-autosync sends only self-messages, + but that requires every node have the same XMPP account configured. + git-annex should support that mode, but it should also send notifications + to a user's buddies. (This will also allow for using XMPP for pairing + in the future.) + +3. Don't make account appear active. Just because git-annex is being an XMPP + client, it doesn't mean that it wants to get chat messages, or make the + user appear active when he's not using his chat program. + +## protocol + +To avoid relying on XMPP extensions, git-annex communicates +using presence messages, and chat messages (with empty body tags, +so clients don't display them). + +git-annex sets a negative presence priority, to avoid any regular messages +getting eaten by its clients. It also sets itself extended away. +Note that this means that chat messages always have to be directed at +specific git-annex clients. + +To the presence and chat messages, it adds its own tag as +[extended content](http://xmpp.org/rfcs/rfc6121.html#presence-extended). +The xml namespace is "git-annex" (not an URL because I hate wasting bandwidth). + +To indicate it's pushed changes to a git repo with a given UUID, a message +that is sent to all buddies and other clients using the account (no +explicit pairing needed), it uses a broadcast presence message containing: + + + +Multiple UUIDs can be listed when multiple clients were pushed. If the +git repo does not have a git-annex UUID, an empty string is used. + +To query if other git-annex clients are around, a presence message is used, +containing: + + + +For pairing, a chat message is sent to every known git-annex client, +containing: + + + +### git push over XMPP + +To indicate that we could push over XMPP, a chat message is sent, +to each known client of each XMPP remote. + + + +To request that a remote push to us, a chat message can be sent. + + + +When replying to an xmpppush message, this is directed at the specific +client that indicated it could push. To solicit pushes from all clients, +the message has to be sent directed indiviaually to each client. + +When a peer is ready to send a git push, it sends: + + + +The receiver runs `git receive-pack`, and sends back its output in +one or more chat messages, directed to the client that is pushing: + + + 007b27ca394d26a05d9b6beefa1b07da456caa2157d7 refs/heads/git-annex report-status delete-refs side-band-64k quiet ofs-delta + + +The sender replies with the data from `git push`, in +one or more chat messages, directed to the receiver: + + + data + + +When `git receive-pack` edits, the receiver indicates its exit +status with a chat message, directed at the sender: + + + +### security + +Data git-annex sends over XMPP will be visible to the XMPP +account's buddies, to the XMPP server, and quite likely to other interested +parties. So it's important to consider the security exposure of using it. + +Even if git-annex sends only a single bit notification, this lets attackers +know when the user is active and changing files. Although the assistant's other +syncing activities can somewhat mask this. + +As soon as git-annex does anything unlike any other client, an attacker can +see how many clients are connected for a user, and fingerprint the ones +running git-annex, and determine how many clients are running git-annex. + +If git-annex sent the UUID of the remote it pushed to, this would let +attackers determine how many different remotes are being used, +and map some of the connections between clients and remotes. + +An attacker could replay push notification messages, reusing UUIDs it's +observed. This would make clients pull repeatedly, perhaps as a DOS. diff --git a/doc/download.mdwn b/doc/download.mdwn index 4de91c9..8c6f5b5 100644 --- a/doc/download.mdwn +++ b/doc/download.mdwn @@ -33,3 +33,8 @@ The git repository has some branches: * `setup` contains configuration for this website * `pristine-tar` contains [pristine-tar](http://kitenet.net/~joey/code/pristine-tar) data to create tarballs of any past git-annex release. + +---- + +Developing git-annex? Patches are very welcome. +You should read [[coding_style]]. diff --git a/doc/forum/Does_git-annex_version_big_files__63__.mdwn b/doc/forum/Does_git-annex_version_big_files__63__.mdwn new file mode 100644 index 0000000..5cbe7d5 --- /dev/null +++ b/doc/forum/Does_git-annex_version_big_files__63__.mdwn @@ -0,0 +1,5 @@ +Hi + +I am trying to understand how git-annex works. Does it version big files at all? + +thanks diff --git a/doc/forum/Don__39__t_understand_local_vs._known_keys.mdwn b/doc/forum/Don__39__t_understand_local_vs._known_keys.mdwn new file mode 100644 index 0000000..39f8c10 --- /dev/null +++ b/doc/forum/Don__39__t_understand_local_vs._known_keys.mdwn @@ -0,0 +1,19 @@ +I just created a new Annex by doing the following: + + 1. git init + 2. git annex init + 3. git annex add . + 4. git commit -m "Added files" + 5. git annex status + +I see the following: + + local annex keys: 224 + local annex size: 41 gigabytes + known annex keys: 235 + known annex size: 49 gigabytes + bloom filter size: 16 mebibytes (0% full) + backend usage: + SHA256: 459 + +Why is there an 8 gigabyte difference here? What/where are those files? What is a bloom filter? diff --git a/doc/forum/Managing_multiple_annexes_with_assistant__63__.mdwn b/doc/forum/Managing_multiple_annexes_with_assistant__63__.mdwn new file mode 100644 index 0000000..30e0597 --- /dev/null +++ b/doc/forum/Managing_multiple_annexes_with_assistant__63__.mdwn @@ -0,0 +1,13 @@ +Is it possible to run more than one instance of the assistant in an account? + +A particular example is that I might have two annexes: + +1. for my music, and +2. for some personal documents. + +I would like to have my music annex paired with my work laptop and have all the automagical power of git-annex-assistant working for me. However I don't want to put my personal documents on a work machine, hence the second annex. + +(I think) An ideal world would have the assistant managing both annexes. Is this possible? + + +I know my question is similar to [one annex versus many annexes?](http://git-annex.branchable.com/forum/one_annex_versus_many_annexes__63__/) but I think it is different enough to warrant a new thread... diff --git a/doc/forum/New_git-annex_integration_mode_for_Emacs_users.mdwn b/doc/forum/New_git-annex_integration_mode_for_Emacs_users.mdwn new file mode 100644 index 0000000..0513e0d --- /dev/null +++ b/doc/forum/New_git-annex_integration_mode_for_Emacs_users.mdwn @@ -0,0 +1,3 @@ +I've started a another project to provide Emacs integration for git-annex: [[https://github.com/jwiegley/git-annex-el]] + +My problem with the existing mode is that it didn't feel at all Emacsy, while my mode just piggy backs on existing paradigms, like using `C-x C-q` in a locked file to make it editable, and allowing you to browse and lock/unlock annexed files from Dired. diff --git a/doc/forum/Removing_files_not_found_by_git_annex_unused.mdwn b/doc/forum/Removing_files_not_found_by_git_annex_unused.mdwn new file mode 100644 index 0000000..ce0aedc --- /dev/null +++ b/doc/forum/Removing_files_not_found_by_git_annex_unused.mdwn @@ -0,0 +1,29 @@ +Hi, + +I've removed some large files with git remove, but seem to be unable to remove the corresponding annex content. + +Example: + +kheymann@corax:~/annex$ find -name "*s24576--10daa3d9007edad720dc057e4272a9c6cda930bef34a83e3bc1d93f1c55b9cac" + +./.git/annex/objects/jF/j7/SHA256-s24576--10daa3d9007edad720dc057e4272a9c6cda930bef34a83e3bc1d93f1c55b9cac + +./.git/annex/objects/jF/j7/SHA256-s24576--10daa3d9007edad720dc057e4272a9c6cda930bef34a83e3bc1d93f1c55b9cac/SHA256-s24576--10daa3d9007edad720dc057e4272a9c6cda930bef34a83e3bc1d93f1c55b9cac + +kheymann@corax:~/annex$ git annex dropkey -vvv --backend SHA256 s24576--10daa3d9007edad720dc057e4272a9c6cda930bef34a83e3bc1d93f1c55b9cac + +No output but the files remain in the annex. Git annex fsck and git annex unused run without listing files to be removes. What can I do apart from deleting the files manually from the annex? + +Some info: + + kheymann@corax:~/annex$ git annex version + git-annex version: 3.20121017 + local repository version: 3 + default repository version: 3 + supported repository versions: 3 + upgrade supported from repository versions: 0 1 2 + +Any hints? + +Best, +Karsten diff --git a/doc/forum/Restricting_git-annex-shell_to_a_specific_repository.mdwn b/doc/forum/Restricting_git-annex-shell_to_a_specific_repository.mdwn new file mode 100644 index 0000000..bed019e --- /dev/null +++ b/doc/forum/Restricting_git-annex-shell_to_a_specific_repository.mdwn @@ -0,0 +1,25 @@ +Is there a way to restrict git-annex-shell to a specific directory? +Currently, if git-annex is paired to a remote repository, it adds this to the authorized_keys: + + + $ cat ~/.ssh/authorized_keys + command="~/.ssh/git-annex-shell",no-agent-forwarding,no-port-forwarding,no-X11-forwarding ssh-rsa AAAAB3... + + $ cat ~/.ssh/git-annex-shell + #!/bin/sh + set -e + exec git-annex-shell -c "$SSH_ORIGINAL_COMMAND" + +That gives whoever has the pubkey the right to access all repositories of one user. +It would be nice to have a manual way to limit the access to a specific repository like + + + $ cat ~/.ssh/git-annex-shell + #!/bin/sh + set -e + export GIT_ANNEX_SHELL_REPO=~/annex + exec git-annex-shell -c "$SSH_ORIGINAL_COMMAND" + + +Or maybe some chroot hackery is the way to go? + diff --git a/doc/forum/Simple_check_out_with_assistant__63__.mdwn b/doc/forum/Simple_check_out_with_assistant__63__.mdwn new file mode 100644 index 0000000..755b237 --- /dev/null +++ b/doc/forum/Simple_check_out_with_assistant__63__.mdwn @@ -0,0 +1,2 @@ +I want to use the annex assistant instead of the command line. Which is the recommended method to check out a file that I want to edit? + diff --git a/doc/forum/Syncing_machines_on_different_networks.mdwn b/doc/forum/Syncing_machines_on_different_networks.mdwn new file mode 100644 index 0000000..6851a6c --- /dev/null +++ b/doc/forum/Syncing_machines_on_different_networks.mdwn @@ -0,0 +1,9 @@ +I've been using git-annex locally for a couple months. So far I've only used it to keep track of files on my laptop and local usb hard drives. Now I would like to add a network into the picture, and hopefully start to move away from Dropbox. + +I have Dropbox on two computers: my home machine and my work machine. The home machine is only on when I'm at home and the work machine is only on when I'm at work, so the computers are never on at the same time and thus can never communicate directly. What are my options for keeping annexes on these two machines in sync? + +Initially I was hoping that I could use an S3 special remote for this, but I see that special remotes only hold the actual file data, not any of the git stuff. So I can't push my changes to S3 at work and then pull those changes in at home. + +From what I can tell from the documentation, the only way I can handle this problem is to have an annex sitting on a VPS or someplace that both my home and work machines can talk to. Is that correct? That would be ok, but if I'm going to put my annex out there in the cloud somewhere, I want the files to be encrypted. It looks like git-annex only supports encryption of file data with special remotes, not a full annex. Is there no way to have some sort of encrypted git-annex hub? + +I backed the assistant and have been following the development blog, but I haven't tried it out yet. Am I correct in thinking that nothing in the assistant will address this particular issue? diff --git a/doc/forum/Truly_purging_dead_repositories.mdwn b/doc/forum/Truly_purging_dead_repositories.mdwn new file mode 100644 index 0000000..8a23143 --- /dev/null +++ b/doc/forum/Truly_purging_dead_repositories.mdwn @@ -0,0 +1 @@ +Since I'm just starting out with git-annex, I've had several false starts in getting things setup nicely between three machines. In the course of so doing, I've ended up with several repositories which no longer exist, and which I've marked dead. However, this is making the "git annex status" report a bit ugly, since these repositories no longer exist and can never exist again. Is there a way to truly purge dead repositories from my annex? I'd be fine with a command that must be ran in tandem on all annexes before doing a sync... diff --git a/doc/forum/Unknown_remote_type_S3.mdwn b/doc/forum/Unknown_remote_type_S3.mdwn new file mode 100644 index 0000000..ae84322 --- /dev/null +++ b/doc/forum/Unknown_remote_type_S3.mdwn @@ -0,0 +1,5 @@ +I have an annex set up with an s3 special remote. It works fine on one computer. I cloned the annex to a USB stick, plugged the stick into a second computer and cloned the annex to that machine. When I try to `git annex initremote cloud` on the second machine, I get the following error: + + git-annex: Unknown remote type S3 + +The second machine is running git-annex 3.20121017. diff --git a/doc/forum/Unlock_files_when_assistant_is_running__63__.mdwn b/doc/forum/Unlock_files_when_assistant_is_running__63__.mdwn new file mode 100644 index 0000000..0c51436 --- /dev/null +++ b/doc/forum/Unlock_files_when_assistant_is_running__63__.mdwn @@ -0,0 +1,13 @@ +I just started using the assistant on an existing annex. I fire up the assistant like so: + + $ git annex webapp + +Everything syncs and looks to be working fine. In another terminal, I then create a new file: + + $ touch testfile.txt + +The assistant sees that file, immediately adds it and syncs. Ok. So now I want to edit that file. + + $ git annex unlock testfile.txt + +As soon as I unlock the file, the assistant re-adds it to the annex and syncs, preventing me from editing the file. How can I edit files with the assistant running? diff --git a/doc/forum/Using___34__sync__34___to_sink_all_branches__63__.mdwn b/doc/forum/Using___34__sync__34___to_sink_all_branches__63__.mdwn new file mode 100644 index 0000000..99ddcc3 --- /dev/null +++ b/doc/forum/Using___34__sync__34___to_sink_all_branches__63__.mdwn @@ -0,0 +1,9 @@ +Is there a way to sync all (or a subset of) local branches, not just the currently checked out branch? + +Does this make sense at all, or does this show I am missing some important point in git-annex? + +I am asking because I would like to use git-annex to keep git repositories with normal git files (with versioned and branched content) in sync. + +If it's not currently possible, could you provide some pointers on where to start, if I wanted to change to Haskell source? + +Thnx diff --git a/doc/forum/Why_does_the_bup_remote_use___126____47__.bup__63__.mdwn b/doc/forum/Why_does_the_bup_remote_use___126____47__.bup__63__.mdwn new file mode 100644 index 0000000..c250ef7 --- /dev/null +++ b/doc/forum/Why_does_the_bup_remote_use___126____47__.bup__63__.mdwn @@ -0,0 +1,5 @@ +I created a test remote using the command: + + git annex initremote mybup type=bup encryption=none buprepo=/tmp/mybup + +I can copy files to and from the remote just fine. However, every time I do so it makes changes inside `~/.bup`. If I delete `~/.bup` it will recreate it. Is this expected? Are the files in `~/.bup` of any consequence? diff --git a/doc/forum/recover_deleted_files___63__.mdwn b/doc/forum/recover_deleted_files___63__.mdwn new file mode 100644 index 0000000..7bec369 --- /dev/null +++ b/doc/forum/recover_deleted_files___63__.mdwn @@ -0,0 +1,66 @@ +hi, + +i think of use git-annex as the backbone of a archival systems. at first point no distributed storage, just 1 node. +but now i run into the topic below ( deleted the "named" symlink of the "object" -- how to recover ?) + +maybe someone can enlighten me... + +thanks, +.ka + +// about the version. ( debian-squeeze, bpo ) + +$ git-annex version +git-annex version: 3.20120629~bpo60+2 +local repository version: 3 +default repository version: 3 +supported repository versions: 3 +upgrade supported from repository versions: 0 1 2 + +// building up a testcase. + +$ git init +Initialized empty Git repository in ...test2/.git/ + +$ git annex init +init ok +(Recording state in git...) + +$ echo "aaa" > 1.txt + +$ echo "bbb" > 2.txt + +$ git-annex add . +add 1.txt (checksum...) ok +add 2.txt (checksum...) ok +(Recording state in git...) + +$ git commit -a -m "added 2 files" +fatal: No HEAD commit to compare with (yet) +fatal: No HEAD commit to compare with (yet) +[master (root-commit) fc2a5d7] added 2 files + Committer: userhere user +Your name and email address were configured automatically based +on your username and hostname. Please check that they are accurate. +... + 2 files changed, 2 insertions(+), 0 deletions(-) + create mode 120000 1.txt + create mode 120000 2.txt + +// ok, so far standard. i have now 2 files - lets delete one. + +$ rm 2.txt +$ ls -l +lrwxrwxrwx 1 xp xp 176 24. Okt 22:55 1.txt -> .git/annex/objects/Z6/7q/SHA256-s4--17e682f060b5f8e47ea04c5c4855908b0a5ad612022260fe50e11ecb0cc0ab76/SHA256-s4--17e682f060b5f8e47ea04c5c4855908b0a5ad612022260fe50e11ecb0cc0ab76 + +// eek, delete of 2.txt was a bad idea (it was just the symlink) -- try to recover... + +$ git-annex fix +$ git-annex fsck +fsck 1.txt (checksum...) ok +$ ls +1.txt + +// still not here.. how to recover the link to 2.txt ??? +// i still see the content of the file in the object folder +// if I want to use git-annex as the backend of a archival system, this is important. diff --git a/doc/forum/safely_dropping_git-annex_history.mdwn b/doc/forum/safely_dropping_git-annex_history.mdwn new file mode 100644 index 0000000..a6d8e66 --- /dev/null +++ b/doc/forum/safely_dropping_git-annex_history.mdwn @@ -0,0 +1,20 @@ +the git-annex branch of a repository i've had running since 2010 has grown to unmanagable dimensions (5gb in a fresh clone of the git-annex branch, while the master branch has merely 40mb, part of which is due to checked-in files), resulting in git-annex-merges to take in the order of magnitude of 15 minutes. getting an initial clone of the git-annex branch (not the data) takes hours alone in the "remote: Counting objects" phase (admittedly, the origin server is limited in ram, so it spends its time swapping the git process back and forth). + +is there a recommended way for how to reset the git-annex branch in a coordinated way? of course, this would have to happen on all copies of the repo at the same time. + +the workflow i currently imagine is + +* rename all copies of the repository (the_repo → the_repo-old, the_repo.git → the_repo-old.git) +* clone the old origin repository to a new origin with --single-branch. (this would be *the* oportunity to ``git filter-branch --prune-empty --index-filter 'git rm --cached --ignore-unmatch .git-annex -r' master`` as well, to get rid of commits of pre-whatever versions) +* ``git annex init`` on the master repository +* clone it to all the other copies and ``git annex init`` there +* set all the configuration options (untrusted repos etc) again +* either + * ``git annex reinject`` the files that are already present on the respective machines, or + * move the .git/annex/objects files over from the original locations, and use ``git annex fsck`` to make git-annex discover which files it already has, if that works. (i have numcopies=2, thus i'd dare to move instead of copy even when trying this out the first time. complete copies, even of partially checked out clones, will exceed the capacities of most clients) + +my questions in that endeavor are: + +* is there already a standard workflow for this? +* if not, will the above do the trick? +* can anything be done to avoid such problems in future? diff --git a/doc/forum/shared_cipher_tries_to_use_gpg.mdwn b/doc/forum/shared_cipher_tries_to_use_gpg.mdwn new file mode 100644 index 0000000..ccaa0c5 --- /dev/null +++ b/doc/forum/shared_cipher_tries_to_use_gpg.mdwn @@ -0,0 +1,10 @@ +I tried + + git annex initremote encsharedtest type=directory encryption=shared directory=/home/lee/gitannexplay + +and got errors: + + initremote encsharedtest gpg: error reading key: public key not found + +Looks like it thinks "shared" should be the name of a key rather than an instruction to use the shared cipher. +Am I doing something wrong? diff --git a/doc/forum/special_remote_for_IMAP.mdwn b/doc/forum/special_remote_for_IMAP.mdwn new file mode 100644 index 0000000..2aa9565 --- /dev/null +++ b/doc/forum/special_remote_for_IMAP.mdwn @@ -0,0 +1,44 @@ +I have implemented a special remote that stores files as email messages on an imap server. You need to install three utilities that the hooks invoke to deal with the email: mutt, imaputils, and munpack. I use mutt to send the email with the file as a mime attachment; imaputils talks to the imap server to check for and retrieve the message containing the desired file; and munpack extracts and decodes the attachment to get our file back. + +Several programs could be used in place of mutt, but the latter has a convenient command-line option for attaching files; mutt is of course available in the repositories of most linux distributions. + +imaputils is a perl program available at http://sourceforge.net/projects/imaputils/ +It has several perl library dependencies that you might need to download using the cpan tool. +What imaputils does for you is provide a command line tool for interacting with the mail server. You can search for mail with a particular subject (for example), delete mail, retrieve messages, and in general do anything that you can do with a mail client such as mutt, but from the command line rather than a curses interface. This allows you to in turn write scripts that talk to imap servers. + +munpack is part of the mpack package. This is included in the Ubuntu and Debian repositories, and can probably be easily obtained for most linuxes. munpack extracts and decodes mime attachments from the command line. + +I define the special remote with + + git annex initremote hogneygmail type=hook encryption=gitannex hooktype=hogneygmail + +The pgp key "gitannex" is a key established just for this purpose, that has no passphrase. This allows me to use encryption transparently. You could also use encryption=shared if your version of git-annex is recent enough. I also did + + git annex untrust hogneygmail + +Here are the hooks: + + hogneygmail-store-hook = mutt -n -s $ANNEX_KEY -a $ANNEX_FILE -- {email address} < /dev/null + hogneygmail-checkpresent-hook = "(imaputils.pl --conf {imap config file} --subject $ANNEX_KEY --count | grep -q \"1 messages\" -) && echo $ANNEX_KEY" + hogneygmail-retrieve-hook = "imaputils.pl --conf {imap config file} --subject $ANNEX_KEY --display | munpack -fq && mv $ANNEX_KEY $ANNEX_FILE; rm $ANNEX_KEY.desc" + hogneygmail-remove-hook = "imaputils.pl --conf {imap config file} --subject $ANNEX_KEY --delete" + +The bits inside of the curly brackets, for example {email address}, are what you need to specialize for your particular case (removing the brackets as well). The {imap config file} is a file that contains some configuration for imaputils. In my case it contains + + ssl + pass {password} + host {mail host} + user {mail username} + box {name of mailbox to check} + +The first line tells imaputils to use ssl, and the third line is the address of the mail host. I include my username and password so I won't need to type these in repeatedly. Of course this means that you need to protect this file carefully. + + + +The operation of the hooks is pretty straightforward. The store-hook attaches the file to an otherwise empty email message with a subject equal to the name of the key, and mails it off. Note that if you use encryption then the keys generated by git-annex here will not be the same ones you see on your local disc. The checkpresent-hook asks the imap server how many emails have the subject equal to the key we are looking for; only if the reply contains "1 messages" are we sure the right one is there. The retrieve-hook uses the "--display" option to imaputils to stream the message, pipes it to munpack to silently extract the attachment (which is our (encrypted) file), and moves the result into the file contents. It then cleans up by deleting the .desc file that I can't prevent munpack from leaving on the disk, even when the message is blank. The remove-hook passes the "-- delete" option to imaputils to supposedly delete the target message from the mail server. + +I've performed very limited testing of this, and my knowledge of git and, especially, git-annex is very primitive, so I'm sure this could be vastly improved. In my testing with gmail this seems to work fine, except that messages do not get deleted from the server - I don't know why. + +I've developed this as an experiment and proof of concept, and have no knowledge of whether actually using this is in accord with the terms of service of gmail or any other mail service you might be using, nor whether it is safe or a good idea. + +-- Lee diff --git a/doc/git-annex-shell.mdwn b/doc/git-annex-shell.mdwn index e6ebe42..5fbc6de 100644 --- a/doc/git-annex-shell.mdwn +++ b/doc/git-annex-shell.mdwn @@ -95,6 +95,11 @@ changed. If set, disallows running git-shell to handle unknown commands. +* GIT_ANNEX_SHELL_DIRECTORY + + If set, git-annex-shell will refuse to run commands that do not operate + on the specified directory. + # SEE ALSO [[git-annex]](1) diff --git a/doc/git-annex.mdwn b/doc/git-annex.mdwn index c427cbf..842139c 100644 --- a/doc/git-annex.mdwn +++ b/doc/git-annex.mdwn @@ -486,6 +486,10 @@ subdirectories). With --force, even files whose content is not currently available will be rekeyed. Use with caution. +* xmppgit + + This command is used internally to perform git pulls over XMPP. + # OPTIONS * --force @@ -540,7 +544,9 @@ subdirectories). * --numcopies=n Overrides the `annex.numcopies` setting, forcing git-annex to ensure the - specified number of copies exist. + specified number of copies exist. + + Note that setting numcopies to 0 is very unsafe. * --time-limit=time @@ -691,6 +697,8 @@ Here are all the supported configuration settings. * `annex.numcopies` Number of copies of files to keep across all repositories. (default: 1) + + Note that setting numcopies to 0 is very unsafe. * `annex.backends` @@ -877,6 +885,11 @@ Here are all the supported configuration settings. Used to identify Amazon S3 special remotes. Normally this is automaticaly set up by `git annex initremote`. +* `remote..annex-xmppaddress` + + Used to identify the XMPP address of a Jabber buddy. + Normally this is set up by the git-annex assistant when pairing over XMPP. + # CONFIGURATION VIA .gitattributes The key-value backend used when adding a new file to the annex can be @@ -893,6 +906,8 @@ the `annex.numcopies` attribute in `.gitattributes` files. For example, this makes two copies be needed for wav files: *.wav annex.numcopies=2 + +Note that setting numcopies to 0 is very unsafe. # FILES diff --git a/doc/install.mdwn b/doc/install.mdwn index 7e88fc0..40a526c 100644 --- a/doc/install.mdwn +++ b/doc/install.mdwn @@ -11,7 +11,6 @@ detailed instructions | quick install [[ArchLinux]] | `yaourt -Sy git-annex` [[NixOS]] | `nix-env -i git-annex` [[Gentoo]] | `emerge git-annex` -[[NixOS]] | `nix install git-annex` [[ScientificLinux5]] | (and other RHEL5 clones like CentOS5) [[openSUSE]] | Windows | [[sorry, Windows not supported yet|todo/windows_support]] diff --git a/doc/install/OSX.mdwn b/doc/install/OSX.mdwn index 00ce5bd..8a0c738 100644 --- a/doc/install/OSX.mdwn +++ b/doc/install/OSX.mdwn @@ -11,7 +11,7 @@ builds a standalone git-annex.app of the git-annex assistant.
 sudo brew update
-sudo brew install haskell-platform git ossp-uuid md5sha1sum coreutils pcre
+sudo brew install haskell-platform git ossp-uuid md5sha1sum coreutils pcre libgsasl gnutls libidn libgsasl pkg-config
 cabal update
 cabal install git-annex --bindir=$HOME/bin
 
@@ -35,7 +35,7 @@ cabal install git-annex --bindir=$HOME/bin Do not forget to add to your PATH variable your ~/bin folder. In your .bashrc, for example:
-PATH=~/bin:/usr/bin/local:$PATH
+PATH=$HOME/bin:/usr/bin/local:$PATH
 
See also: diff --git a/doc/install/OSX/comment_6_12bd83e7e2327c992448e87bdb85d17e._comment b/doc/install/OSX/comment_6_12bd83e7e2327c992448e87bdb85d17e._comment new file mode 100644 index 0000000..62851c1 --- /dev/null +++ b/doc/install/OSX/comment_6_12bd83e7e2327c992448e87bdb85d17e._comment @@ -0,0 +1,15 @@ +[[!comment format=mdwn + username="https://me.yahoo.com/a/6xTna_B_h.ECb6_ftC2dYLytAEwrv36etg_054U-#4c1e7" + nickname="Fake" + subject="libncurses on 10.7" + date="2012-10-17T21:24:24Z" + content=""" +I'm getting an error from gpg when I try to set up a repository on a remote server with encrypted rsync. Looks like libncurses in /usr/lib is 32 bit: + + Dyld Error Message: + Library not loaded: /opt/local/lib/libncurses.5.dylib + Referenced from: /Applications/git-annex.app/Contents/MacOS/opt/local/lib/libreadline.6.2.dylib + Reason: no suitable image found. Did find: + /usr/lib/libncurses.5.dylib: mach-o, but wrong architecture + /usr/lib/libncurses.5.dylib: mach-o, but wrong architecture +"""]] diff --git a/doc/install/OSX/comment_7_93e0bb53ac2d7daef53426fbdc5f92d9._comment b/doc/install/OSX/comment_7_93e0bb53ac2d7daef53426fbdc5f92d9._comment new file mode 100644 index 0000000..fccd9fb --- /dev/null +++ b/doc/install/OSX/comment_7_93e0bb53ac2d7daef53426fbdc5f92d9._comment @@ -0,0 +1,15 @@ +[[!comment format=mdwn + username="https://www.google.com/accounts/o8/id?id=AItOawkurjhi0CRJvgm7QNaZDWS9hitBtavqIpc" + nickname="Bret" + subject="git-annex.app Not working on 32 bit machines" + date="2012-11-03T19:18:47Z" + content=""" +I tried running the git-annex.app on my Core Duo Macbook pro, and it does not run at all. I get an error on my system.log + +`Nov 3 12:13:26 Bret-Mac [0x0-0x15015].com.branchable.git-annex[155]: /Applications/git-annex.app/Contents/MacOS/runshell: line 52: /Applications/git-annex.app/Contents/MacOS/bin/git-annex: Bad CPU type in executable +Nov 3 12:13:26 Bret-Mac com.apple.launchd.peruser.501[92] ([0x0-0x15015].com.branchable.git-annex[155]): Exited with exit code: 1` + +It works on my 64 bit machine, and this has become quite the problem for a while now, where people with newer macs dont compile back for a 32bit machine. + +Is there any hope for a pre-compiled binary that works on a 32 bit machine? +"""]] diff --git a/doc/install/OSX/comment_8_141eac2f3fb25fe18b4268786f00ad6a._comment b/doc/install/OSX/comment_8_141eac2f3fb25fe18b4268786f00ad6a._comment new file mode 100644 index 0000000..f7b1f22 --- /dev/null +++ b/doc/install/OSX/comment_8_141eac2f3fb25fe18b4268786f00ad6a._comment @@ -0,0 +1,8 @@ +[[!comment format=mdwn + username="https://www.google.com/accounts/o8/id?id=AItOawkSq2FDpK2n66QRUxtqqdbyDuwgbQmUWus" + nickname="Jimmy" + subject="comment 8" + date="2012-11-07T16:08:00Z" + content=""" +I've been updating my haskell platform install recently, i used to try and get the builder to spit out 32/64bit binaries, but recently it's just become too messy, I've just migrated to a full 64bit build system. I'm afraid I won't be able to provide 32bit builds any more. +"""]] diff --git a/doc/install/comment_1_da5919c986d2ae187bc2f73de9633978._comment b/doc/install/comment_1_da5919c986d2ae187bc2f73de9633978._comment new file mode 100644 index 0000000..d4db232 --- /dev/null +++ b/doc/install/comment_1_da5919c986d2ae187bc2f73de9633978._comment @@ -0,0 +1,8 @@ +[[!comment format=mdwn + username="https://www.google.com/accounts/o8/id?id=AItOawlwYMdU0H7P7MMlD0v_BcczO-ZkYHY4zuY" + nickname="Morris" + subject="Arch Linux" + date="2012-10-17T13:21:24Z" + content=""" +For Arch Linux there should be the AUR package [git-annex-bin](https://aur.archlinux.org/packages.php?ID=63503) mentioned, because it's easier to install (no haskell dependencies to be installed) and is based on the prebuild linux binary tarball. +"""]] diff --git a/doc/install/fromscratch.mdwn b/doc/install/fromscratch.mdwn index 795443b..18759d6 100644 --- a/doc/install/fromscratch.mdwn +++ b/doc/install/fromscratch.mdwn @@ -18,6 +18,8 @@ quite a lot. * [bloomfilter](http://hackage.haskell.org/package/bloomfilter) * [edit-distance](http://hackage.haskell.org/package/edit-distance) * [hS3](http://hackage.haskell.org/package/hS3) (optional) + * [SafeSemaphore](http://hackage.haskell.org/package/SafeSemaphore) + * [async](http://hackage.haskell.org/package/async) * Optional haskell stuff, used by the [[assistant]] and its webapp (edit Makefile to disable) * [stm](http://hackage.haskell.org/package/stm) (version 2.3 or newer) @@ -35,12 +37,14 @@ quite a lot. * [wai-logger](http://hackage.haskell.org/package/wai-logger) * [warp](http://hackage.haskell.org/package/warp) * [blaze-builder](http://hackage.haskell.org/package/blaze-builder) - * [blaze-html](http://hackage.haskell.org/package/blaze-html) * [crypto-api](http://hackage.haskell.org/package/crypto-api) * [hamlet](http://hackage.haskell.org/package/hamlet) * [clientsession](http://hackage.haskell.org/package/clientsession) * [network-multicast](http://hackage.haskell.org/package/network-multicast) * [network-info](http://hackage.haskell.org/package/network-info) + * [network-protocol-xmpp](http://hackage.haskell.org/package/network-protocol-xmpp) + * [dns](http://hackage.haskell.org/package/dns) + * [xml-types](http://hackage.haskell.org/package/xml-types) * Shell commands * [git](http://git-scm.com/) * [uuid](http://www.ossp.org/pkg/lib/uuid/) diff --git a/doc/news/version_3.20121001.mdwn b/doc/news/version_3.20121001.mdwn deleted file mode 100644 index 4505e9b..0000000 --- a/doc/news/version_3.20121001.mdwn +++ /dev/null @@ -1,27 +0,0 @@ -git-annex 3.20121001 released with [[!toggle text="these changes"]] -[[!toggleable text=""" - * fsck: Now has an incremental mode. Start a new incremental fsck pass - with git annex fsck --incremental. Now the fsck can be interrupted - as desired, and resumed with git annex fsck --more. - Thanks, Justin Azoff - * New --time-limit option, makes long git-annex commands stop after - a specified amount of time. - * fsck: New --incremental-schedule option which is nice for scheduling - eg, monthly incremental fsck runs in cron jobs. - * Fix fallback to ~/Desktop when xdg-user-dir is not available. - Closes: #[688833](http://bugs.debian.org/688833) - * S3: When using a shared cipher, S3 credentials are not stored encrypted - in the git repository, as that would allow anyone with access to - the repository access to the S3 account. Instead, they're stored - in a 600 mode file in the local git repo. - * webapp: Avoid crashing when ssh-keygen -F chokes on an invalid known\_hosts - file. - * Always do a system wide installation when DESTDIR is set. Closes: #[689052](http://bugs.debian.org/689052) - * The Makefile now builds with the new yesod by default. - Systems like Debian that have the old yesod 1.0.1 should set - GIT\_ANNEX\_LOCAL\_FEATURES=-DWITH\_OLD\_YESOD - * copy: Avoid updating the location log when no copy is performed. - * configure: Test that uuid -m works, falling back to plain uuid if not. - * Avoid building the webapp on Debian architectures that do not yet - have template haskell and thus yesod. (Should be available for arm soonish - I hope)."""]] \ No newline at end of file diff --git a/doc/news/version_3.20121112.mdwn b/doc/news/version_3.20121112.mdwn new file mode 100644 index 0000000..8ebd819 --- /dev/null +++ b/doc/news/version_3.20121112.mdwn @@ -0,0 +1,48 @@ +git-annex 3.20121112 released with [[!toggle text="these changes"]] +[[!toggleable text=""" + * assistant: Can use XMPP to notify other nodes about pushes made to other + repositories, as well as pushing to them directly over XMPP. + * wepapp: Added an XMPP configuration interface. + * webapp: Supports pairing over XMPP, with both friends, and other repos + using the same account. + * assistant: Drops non-preferred content when possible. + * assistant: Notices, and applies config changes as they are made to + the git-annex branch, including config changes pushed in from remotes. + * git-annex-shell: GIT\_ANNEX\_SHELL\_DIRECTORY can be set to limit it + to operating on a specified directory. + * webapp: When setting up authorized\_keys, use GIT\_ANNEX\_SHELL\_DIRECTORY. + * Preferred content path matching bugfix. + * Preferred content expressions cannot use "in=". + * Preferred content expressions can use "present". + * Fix handling of GIT\_DIR when it refers to a git submodule. + * Depend on and use the Haskell SafeSemaphore library, which provides + exception-safe versions of SampleVar and QSemN. + Thanks, Ben Gamari for an excellent patch set. + * file:/// URLs can now be used with the web special remote. + * webapp: Allow dashes in ssh key comments when pairing. + * uninit: Check and abort if there are symlinks to annexed content that + are not checked into git. + * webapp: Switched to using the same multicast IP address that avahi uses. + * bup: Don't pass - to bup-split to make it read stdin; bup 0.25 + does not accept that. + * bugfix: Don't fail transferring content from read-only repos. + Closes: #[691341](http://bugs.debian.org/691341) + * configure: Check that checksum programs produce correct checksums. + * Re-enable dbus, using a new version of the library that fixes the memory + leak. + * NetWatcher: When dbus connection is lost, try to reconnect. + * Use USER and HOME environment when set, and only fall back to getpwent, + which doesn't work with LDAP or NIS. + * rsync special remote: Include annex-rsync-options when running rsync + to test a key's presence. + * The standalone tarball's runshell now takes care of installing a + ~/.ssh/git-annex-shell wrapper the first time it's run. + * webapp: Make an initial, empty commit so there is a master branch + * assistant: Fix syncing local drives. + * webapp: Fix creation of rsync.net repositories. + * webapp: Fix renaming of special remotes. + * webapp: Generate better git remote names. + * webapp: Ensure that rsync special remotes are enabled using the same + name they were originally created using. + * Bugfix: Fix hang in webapp when setting up a ssh remote with an absolute + path."""]] \ No newline at end of file diff --git a/doc/preferred_content.mdwn b/doc/preferred_content.mdwn index d749865..ac2cd1e 100644 --- a/doc/preferred_content.mdwn +++ b/doc/preferred_content.mdwn @@ -20,17 +20,18 @@ The expressions are very similar to the file matching options documented on the [[git-annex]] man page. At the command line, you can use those options in commands like this: - git annex get --include='*.mp3' --and -'(' --not --in=archive -')' + git annex get --include='*.mp3' --and -'(' --not --largerthan=100mb -')' The equivilant preferred content expression looks like this: - include=*.mp3 and (not in=archive) + include=*.mp3 and (not largerthan=100mb) -So, just remove the dashes, basically. +So, just remove the dashes, basically. However, there are some differences +from the command line options to keep in mind: -## file matching +### difference: file matching -Note that while --include and --exclude match files relative to the current +While --include and --exclude match files relative to the current directory, preferred content expressions always match files relative to the top of the git repository. Perhaps you put files into `archive` directories when you're done with them. Then you could configure your laptop to prefer @@ -38,6 +39,48 @@ to not retain those files, like this: exclude=*/archive/* +### difference: no "in=" + +Preferred content expressions have no direct equivilant to `--in`. + +Often, it's best to add repositories to groups, and match against +the groups in a preferred content expression. So rather than +`--in=usbdrive`, put all the USB drives into a "transfer" group, +and use "copies=transfer:1" + +### difference: dropping + +To decide if content should be dropped, git-annex evaluates the preferred +content expression under the assumption that the content has *already* been +dropped. If the content would not be preferred then, the drop can be done. +So, for example, `copies=2` in a preferred content expression lets +content be dropped only when there are currently 3 copies of it, including +the repo it's being dropped from. This is different than running `git annex +drop --copies=2`, which will drop files that current have 2 copies. + +A wrinkle of this approach is how `in=` is handled. When deciding if +content should be dropped, git-annex looks at the current status, not +the status if the content would be dropped. So `in=here` means that +any currently present content is preferred, which can be useful if you +want manual control over content. Meanwhile `not (in=here)` should be +avoided -- it will cause content that's not here to be preferred, +but once the content arrives, it'll stop being preferred and will be +dropped again! + +## difference: "present" + +There's a special "present" keyword you can use in a preferred content +expression. This means that content is preferred if it's present, +and not otherwise. This leaves it up to you to use git-annex manually +to move content around. You can use this to avoid preferred content +settings from affecting a subdirectory. For example: + + auto/* or (include=ad-hoc/* and present) + +Note that `not present` is a very bad thing to put in a preferred content +expression. It'll make it prefer to get content that's not present, and +drop content that is present! Don't go there.. + ## standard expressions git-annex comes with some standard preferred content expressions, that can diff --git a/doc/special_remotes.mdwn b/doc/special_remotes.mdwn index 2f60204..65fcb87 100644 --- a/doc/special_remotes.mdwn +++ b/doc/special_remotes.mdwn @@ -22,6 +22,7 @@ for various cloud things: * [[tips/Internet_Archive_via_S3]] * [[tahoe-lafs|forum/tips:_special__95__remotes__47__hook_with_tahoe-lafs]] * [[tips/using_box.com_as_a_special_remote]] +* [[forum/special_remote_for_IMAP]] ## Unused content on special remotes diff --git a/doc/special_remotes/bup/comment_1_96179a003da4444f6fc08867872cda0a._comment b/doc/special_remotes/bup/comment_1_96179a003da4444f6fc08867872cda0a._comment new file mode 100644 index 0000000..02691c6 --- /dev/null +++ b/doc/special_remotes/bup/comment_1_96179a003da4444f6fc08867872cda0a._comment @@ -0,0 +1,43 @@ +[[!comment format=mdwn + username="https://www.google.com/accounts/o8/id?id=AItOawkgbXwQtPQSG8igdS7U8l031N8sqDmuyvk" + nickname="Albert" + subject="Error with bup and gnupg" + date="2012-10-22T20:56:56Z" + content=""" +Hello, + +I get this error when trying to use git-annex with bup and gnupg: + +
+move importable_pilot_surveys.tar (gpg) (checking localaseebup...) (to localaseebup...) 
+Traceback (most recent call last):
+  File \"/usr/lib/bup/cmd/bup-split\", line 133, in 
+    progress=prog)
+  File \"/usr/lib/bup/bup/hashsplit.py\", line 167, in split_to_shalist
+    for (sha,size,bits) in sl:
+  File \"/usr/lib/bup/bup/hashsplit.py\", line 118, in _split_to_blobs
+    for (blob, bits) in hashsplit_iter(files, keep_boundaries, progress):
+  File \"/usr/lib/bup/bup/hashsplit.py\", line 86, in _hashsplit_iter
+    bnew = next(fi)
+  File \"/usr/lib/bup/bup/helpers.py\", line 86, in next
+    return it.next()
+  File \"/usr/lib/bup/bup/hashsplit.py\", line 49, in blobiter
+    for filenum,f in enumerate(files):
+  File \"/usr/lib/bup/cmd/bup-split\", line 128, in 
+    files = extra and (open(fn) for fn in extra) or [sys.stdin]
+IOError: [Errno 2] No such file or directory: '-'
+
+ + +I was able to work-around this issue by altering /usr/lib/bup/cmd/bup-split (though I don't think its a bup bug) to just pull from stdin: + +files = [sys.stdin] + +on ~ line 128. + +Any ideas? Also, do you think that bup's data-deduplication does anything when gnupg is enabled, i.e. is it just as well to use a directory remote with gnupg? + +Thanks! Git annex rules! + +Albert +"""]] diff --git a/doc/special_remotes/bup/comment_2_612b038c15206f9f3c2e23c7104ca627._comment b/doc/special_remotes/bup/comment_2_612b038c15206f9f3c2e23c7104ca627._comment new file mode 100644 index 0000000..97af184 --- /dev/null +++ b/doc/special_remotes/bup/comment_2_612b038c15206f9f3c2e23c7104ca627._comment @@ -0,0 +1,12 @@ +[[!comment format=mdwn + username="http://joeyh.name/" + ip="4.154.0.23" + subject="comment 2" + date="2012-10-23T20:01:43Z" + content=""" +@Albert, thanks for reporting this bug (but put them in [[bugs]] in future please). + +This is specific to using the bup special remote with encryption. Without encryption it works. And no, it won't manage to deduplicate anything that's encrypted, as far as I know. + +I think bup-split must have used - for stdin in the past, but now, it just reads from stdin when no file is specified, so I've updated git-annex. +"""]] diff --git a/doc/sync.mdwn b/doc/sync.mdwn index 057dcb3..540e645 100644 --- a/doc/sync.mdwn +++ b/doc/sync.mdwn @@ -22,7 +22,7 @@ fetches from each remote, and merges in any changes that have been made to the remotes too. Finally, it updates `synced/master` to reflect the new state of `master`, and pushes it out to each of the remotes. -This way, changes propigate around between repositories as `git annex sync` +This way, changes propagate around between repositories as `git annex sync` is run on each of them. Every repository does not need to be able to talk to every other repository; as long as the graph of repositories is connected, and `git annex sync` is run from time to time on each, a given diff --git a/doc/tips/assume-unstaged.mdwn b/doc/tips/assume-unstaged.mdwn index ef74d9b..536772c 100644 --- a/doc/tips/assume-unstaged.mdwn +++ b/doc/tips/assume-unstaged.mdwn @@ -5,7 +5,7 @@ up `git status` and stuff by not statting the whole tree looking for changed files. This feature works quite well with git-annex. Especially because git -annex's files are immutable, so arn't going to change out from under it, +annex's files are immutable, so aren't going to change out from under it, this is a nice fit. If you have a very large tree and `git status` is annoyingly slow, you can turn it on: diff --git a/doc/tips/emacs_integration.mdwn b/doc/tips/emacs_integration.mdwn index a607642..12f1688 100644 --- a/doc/tips/emacs_integration.mdwn +++ b/doc/tips/emacs_integration.mdwn @@ -5,3 +5,16 @@ dired style. Locally available files are colored differently, and pressing g runs `git annex get` on the file at point. + +---- + +John Wiegley has developed a brand new git-annex interaction mode for +Emacs, which aims to integrate with the standard facilities +(C-x C-q, M-x dired, etc) rather than invent its own interface. + + + +He has also added support to org-attach; if +`org-attach-git-annex-cutoff' is non-nil and smaller than the size + of the file you're attaching then org-attach will `git annex add the +file`; otherwise it will "git add" it. diff --git a/doc/todo/add_-all_option.mdwn b/doc/todo/add_-all_option.mdwn index e6fa0b3..351d257 100644 --- a/doc/todo/add_-all_option.mdwn +++ b/doc/todo/add_-all_option.mdwn @@ -5,7 +5,8 @@ every keys with content not present). This would be useful when a repository has a history with deleted files whose content you want to keep (so you're not using `dropunused`). Or when you have a lot of branches and just want to be able to fsck -every file referenced in any branch. It could also be useful (or even a +every file referenced in any branch (or indeed, any file referenced in any +ref). It could also be useful (or even a good default) in a bare repository. A problem with the idea is that `.gitattributes` values for keys not diff --git a/doc/todo/incremental_fsck.mdwn b/doc/todo/incremental_fsck.mdwn index 5cdcd66..7c56328 100644 --- a/doc/todo/incremental_fsck.mdwn +++ b/doc/todo/incremental_fsck.mdwn @@ -17,3 +17,8 @@ clear the sticky bit. --[[Joey]] > * --max-age=30d Once the incremental fsck completes and was started 30 days ago, > start a new one. > * --time-limit --size-limit --file-limit: Limit how long the fsck runs. + +>> Calling this [[done]]. The `--incremental-schedule` option +>> allows scheduling time between incremental fscks. `--time-limit` is +>> done. I implemented `--smallerthan` independently. Not clear what +>> `--file-limit` would be. --[[Joey]] diff --git a/doc/todo/wishlist:_An_--all_option_for_dropunused.mdwn b/doc/todo/wishlist:_An_--all_option_for_dropunused.mdwn new file mode 100644 index 0000000..7f7ac13 --- /dev/null +++ b/doc/todo/wishlist:_An_--all_option_for_dropunused.mdwn @@ -0,0 +1 @@ +Cleaning out a repository is presently a fairly manual process. Am I missing a UI trick? "dropunsed" with no arguments prints nothing at all; I think in that case it should display the list of what could be dropped. diff --git a/doc/todo/wishlist:_disable_automatic_commits.mdwn b/doc/todo/wishlist:_disable_automatic_commits.mdwn new file mode 100644 index 0000000..873da9b --- /dev/null +++ b/doc/todo/wishlist:_disable_automatic_commits.mdwn @@ -0,0 +1,13 @@ +When using the [[/assistant]] on some of my repositories, I would like to retain manual control over the granularity and contents of the commit history. Some motivating reasons: + +* manually specified commit messages makes the history easier to follow +* make a series of minor changes to a file over a period of a few hours would result in a single commit rather than capturing intermediate incomplete edits +* manual choice of which files to annex (based on predicted usage) could be useful, e.g. a repo might contain a 4MB PDF which you want available in *every* remote even without `git annex get`, and also some 2MB images which are only required in some remotes + +Obviously this needs to be configurable at least per repository, and ideally perhaps even per remote, since usage habits can vary from machine to machine (e.g. I could choose to commit manually from my desktop machine which has a nice comfy keyboard and large screen, but this would be too much pain to do from my tiny netbook). + +In fact, this is vaguely related to [[design/assistant/partial_content]], since the usefulness of the commit history depends on the context of the data being manipulated, which in turn depends on which subdirectories are being touched. So any mechanism for disabling sync per directory could potentially be reused for disabling auto-commit per directory. + +According to Joey, it should be easy to arrange for the watcher thread not to run, but would need some more work for the assistant to notice manual commits in order to sync them; however the assistant already does some crazy inotify watching of git refs, in order to detect incoming pushes, so detecting manual commits wouldn't be a stretch. + +[[!tag design/assistant]] diff --git a/doc/todo/wishlist:_make_partial_files_available_during_transfer.mdwn b/doc/todo/wishlist:_make_partial_files_available_during_transfer.mdwn new file mode 100644 index 0000000..b021c90 --- /dev/null +++ b/doc/todo/wishlist:_make_partial_files_available_during_transfer.mdwn @@ -0,0 +1,18 @@ +Imagine this situation: +You have a laptop and a NAS. +On your laptop you want to consume a large media file located on the NAS. +So you type: + + git annex get --from nas mediafile + +But now you have to wait for the download to complete, unless either + +* rsync is pointed directly to the file in the object storage ("--inplace") +or +* the symlink temporarily points to the partial file during a transfer + +which would allow you instantaneous consumption of your media. +It might make sense to make this behavior configurable, because not everyone might agree with having partial content (that mismatches its key) around. + + +So what do you say? diff --git a/ghci b/ghci new file mode 100755 index 0000000..d25519e --- /dev/null +++ b/ghci @@ -0,0 +1,5 @@ +#!/bin/sh +# This runs ghci with the same flags used when compiling with ghc. +# Certian flags need to be the same in order for ghci to reuse compiled +# objects. +ghci $(make getflags | sed 's/-Wall//') $@ diff --git a/git-annex-shell.1 b/git-annex-shell.1 index a5be42a..36197e5 100644 --- a/git-annex-shell.1 +++ b/git-annex-shell.1 @@ -79,6 +79,10 @@ If set, disallows any command that could modify the repository. .IP "GIT_ANNEX_SHELL_LIMITED" If set, disallows running git\-shell to handle unknown commands. .IP +.IP "GIT_ANNEX_SHELL_DIRECTORY" +If set, git\-annex\-shell will refuse to run commands that do not operate +on the specified directory. +.IP .SH SEE ALSO git\-annex(1) .PP diff --git a/git-annex.1 b/git-annex.1 index 15b6a34..448c706 100644 --- a/git-annex.1 +++ b/git-annex.1 @@ -436,6 +436,9 @@ both the file, and the new key to use for it. With \-\-force, even files whose content is not currently available will be rekeyed. Use with caution. .IP +.IP "xmppgit" +This command is used internally to perform git pulls over XMPP. +.IP .SH OPTIONS .IP "\-\-force" .IP @@ -480,7 +483,9 @@ It should be specified using the name of a configured remote. .IP .IP "\-\-numcopies=n" Overrides the annex.numcopies setting, forcing git\-annex to ensure the -specified number of copies exist. +specified number of copies exist. +.IP +Note that setting numcopies to 0 is very unsafe. .IP .IP "\-\-time\-limit=time" Limits how long a git\-annex command runs. The time can be something @@ -609,6 +614,8 @@ A unique UUID for this repository (automatically set). .IP "annex.numcopies" Number of copies of files to keep across all repositories. (default: 1) .IP +Note that setting numcopies to 0 is very unsafe. +.IP .IP "annex.backends" Space\-separated list of names of the key\-value backends to use. The first listed is used to store new files by default. @@ -765,6 +772,10 @@ but you can change it if needed. Used to identify Amazon S3 special remotes. Normally this is automaticaly set up by git annex initremote. .IP +.IP "remote..annex\-xmppaddress" +Used to identify the XMPP address of a Jabber buddy. +Normally this is set up by the git\-annex assistant when pairing over XMPP. +.IP .SH CONFIGURATION VIA .gitattributes The key\-value backend used when adding a new file to the annex can be configured on a per\-file\-type basis via .gitattributes files. In the file, @@ -781,6 +792,8 @@ For example, this makes two copies be needed for wav files: .PP *.wav annex.numcopies=2 .PP +Note that setting numcopies to 0 is very unsafe. +.PP .SH FILES These files are used by git\-annex: .PP diff --git a/git-annex.cabal b/git-annex.cabal index 20d4211..511d2f2 100644 --- a/git-annex.cabal +++ b/git-annex.cabal @@ -1,5 +1,5 @@ Name: git-annex -Version: 3.20121017 +Version: 3.20121112 Cabal-Version: >= 1.8 License: GPL Maintainer: Joey Hess @@ -43,29 +43,33 @@ Flag Webapp Flag Pairing Description: Enable pairing +Flag XMPP + Description: Enable notifications using XMPP + +Flag DNS + Description: Enable the haskell DNS library for DNS lookup + Executable git-annex Main-Is: git-annex.hs Build-Depends: MissingH, hslogger, directory, filepath, - unix, containers, utf8-string, network (>= 2.4.0.1), mtl, + unix, containers, utf8-string, network (>= 2.0), mtl, bytestring, old-locale, time, pcre-light, extensible-exceptions, dataenc, SHA, process, json, HTTP, - base == 4.5.*, monad-control, transformers-base, lifted-base, - IfElse, text, QuickCheck >= 2.1, bloomfilter, edit-distance, process + base (>= 4.5 && < 4.7), monad-control, transformers-base, lifted-base, + IfElse, text, QuickCheck >= 2.1, bloomfilter, edit-distance, process, + SafeSemaphore, async -- Need to list these because they're generated from .hsc files. Other-Modules: Utility.Touch Utility.Mounts Include-Dirs: Utility C-Sources: Utility/libdiskfree.c Utility/libmounts.c - if (! os(linux)) - C-Sources: Utility/libkqueue.c Extensions: CPP GHC-Options: -threaded - CPP-Options: -DURI_24 if flag(S3) Build-Depends: hS3 CPP-Options: -DWITH_S3 - if flag(Assistant) + if flag(Assistant) && ! os(windows) && ! os(solaris) Build-Depends: stm >= 2.3 CPP-Options: -DWITH_ASSISTANT @@ -73,17 +77,18 @@ Executable git-annex Build-Depends: hinotify CPP-Options: -DWITH_INOTIFY else - if (! os(windows)) + if (! os(windows) && ! os(solaris)) CPP-Options: -DWITH_KQUEUE + C-Sources: Utility/libkqueue.c if os(linux) && flag(Dbus) - Build-Depends: dbus + Build-Depends: dbus (>= 0.10.3) CPP-Options: -DWITH_DBUS if flag(Webapp) && flag(Assistant) Build-Depends: yesod, yesod-static, case-insensitive, http-types, transformers, wai, wai-logger, warp, blaze-builder, - blaze-html, crypto-api, hamlet, clientsession, + crypto-api, hamlet, clientsession, template-haskell, yesod-default (>= 1.1.0), data-default CPP-Options: -DWITH_WEBAPP @@ -91,13 +96,21 @@ Executable git-annex Build-Depends: network-multicast, network-info CPP-Options: -DWITH_PAIRING + if flag(XMPP) && flag(Assistant) + Build-Depends: network-protocol-xmpp, gnutls (>= 0.1.4), xml-types + CPP-Options: -DWITH_XMPP + + if flag(XMPP) && flag(Assistant) && flag(DNS) + Build-Depends: dns + CPP-Options: -DWITH_DNS + Test-Suite test Type: exitcode-stdio-1.0 Main-Is: test.hs Build-Depends: testpack, HUnit, MissingH, hslogger, directory, filepath, unix, containers, utf8-string, network, mtl, bytestring, old-locale, time, pcre-light, extensible-exceptions, dataenc, SHA, process, json, HTTP, - base == 4.5.*, monad-control, transformers-base, lifted-base, + base (>= 4.5 && < 4.7), monad-control, transformers-base, lifted-base, IfElse, text, QuickCheck >= 2.1, bloomfilter, edit-distance, process Other-Modules: Utility.Touch Include-Dirs: Utility diff --git a/git-annex.hs b/git-annex.hs index f5f2f22..60ed6c1 100644 --- a/git-annex.hs +++ b/git-annex.hs @@ -13,9 +13,9 @@ import qualified GitAnnexShell main :: IO () main = run =<< getProgName - where - run n - | isshell n = go GitAnnexShell.run - | otherwise = go GitAnnex.run - isshell n = takeFileName n == "git-annex-shell" - go a = a =<< getArgs + where + run n + | isshell n = go GitAnnexShell.run + | otherwise = go GitAnnex.run + isshell n = takeFileName n == "git-annex-shell" + go a = a =<< getArgs diff --git a/standalone/linux/runshell b/standalone/linux/runshell index da26cd3..82d3ce2 100755 --- a/standalone/linux/runshell +++ b/standalone/linux/runshell @@ -26,6 +26,19 @@ cd "$base" base="$(pwd)" cd "$orig" +# Install shim that's used to run git-annex-shell from ssh authorized_keys. +# The assistant also does this when run, but the user may not be using the +# assistant. +if [ ! -e "$HOME/.ssh/git-annex-shell" ]; then + mkdir "$HOME/.ssh" >/dev/null 2>&1 || true + ( + echo "#!/bin/sh" + echo "set -e" + echo "exec $base/runshell git-annex-shell -c \"\$SSH_ORIGINAL_COMMAND\"" + ) > "$HOME/.ssh/git-annex-shell" + chmod +x "$HOME/.ssh/git-annex-shell" +fi + # Put our binaries first, to avoid issues with out of date or incompatable # system binaries. PATH=$base/bin:$PATH diff --git a/standalone/osx/git-annex.app/Contents/MacOS/runshell b/standalone/osx/git-annex.app/Contents/MacOS/runshell index 719c85c..c409d81 100755 --- a/standalone/osx/git-annex.app/Contents/MacOS/runshell +++ b/standalone/osx/git-annex.app/Contents/MacOS/runshell @@ -26,6 +26,19 @@ cd "$base" base="$(pwd)" cd "$orig" +# Install shim that's used to run git-annex-shell from ssh authorized_keys. +# The assistant also does this when run, but the user may not be using the +# assistant. +if [ ! -e "$HOME/.ssh/git-annex-shell" ]; then + mkdir "$HOME/.ssh" >/dev/null 2>&1 || true + ( + echo "#!/bin/sh" + echo "set -e" + echo "exec $base/runshell git-annex-shell -c \"\$SSH_ORIGINAL_COMMAND\"" + ) > "$HOME/.ssh/git-annex-shell" + chmod +x "$HOME/.ssh/git-annex-shell" +fi + # Put our binaries first, to avoid issues with out of date or incompatable # system binaries. PATH=$base/bin:$PATH diff --git a/templates/configurators/intro.hamlet b/templates/configurators/intro.hamlet index 544b039..f3651c4 100644 --- a/templates/configurators/intro.hamlet +++ b/templates/configurators/intro.hamlet @@ -4,7 +4,7 @@ git-annex is watching over your files in #{reldir}

It will automatically notice changes, and keep files in sync # - $if notenough + $if (null repolist) with repositories elsewhere ...

But no other repositories are set up yet. diff --git a/templates/configurators/main.hamlet b/templates/configurators/main.hamlet index b0fdcc2..20ce9ed 100644 --- a/templates/configurators/main.hamlet +++ b/templates/configurators/main.hamlet @@ -7,3 +7,18 @@

Distribute the files in this repository to other devices, # make backups, and more, by adding repositories. +

+ $if xmppconfigured +

+ + Re-configure jabber account +

+ Your jabber account is set up, and will be used to keep # + in touch with remote devices, and with your friends. + $else +

+ + Configure jabber account +

+ Keep in touch with remote devices, and with your friends, # + by configuring a jabber account. diff --git a/templates/configurators/pairing/disabled.hamlet b/templates/configurators/pairing/disabled.hamlet index c946aac..c924c4b 100644 --- a/templates/configurators/pairing/disabled.hamlet +++ b/templates/configurators/pairing/disabled.hamlet @@ -1,5 +1,5 @@

- Pairing not supported + not supported

- This build of git-annex does not support pairing. Sorry! + This build of git-annex does not support #{pairingtype} pairing. Sorry! diff --git a/templates/configurators/pairing/inprogress.hamlet b/templates/configurators/pairing/inprogress.hamlet deleted file mode 100644 index da783a7..0000000 --- a/templates/configurators/pairing/inprogress.hamlet +++ /dev/null @@ -1,18 +0,0 @@ -

-

- Pairing in progress .. - $if T.null secret -

- You do not need to leave this page open; pairing will finish # - automatically. - $else -

- Now you should either go tell the owner of the computer you want to pair # - with the secret phrase you selected ("#{secret}"), or go enter it into # - the computer you want to pair with. -

- You do not need to leave this page open; pairing will finish automatically # - as soon as the secret phrase is entered into the other computer. -

- If you're not seeing a pair request on the other computer, try moving # - it to the same switch or wireless network as this one. diff --git a/templates/configurators/pairing/local/inprogress.hamlet b/templates/configurators/pairing/local/inprogress.hamlet new file mode 100644 index 0000000..da783a7 --- /dev/null +++ b/templates/configurators/pairing/local/inprogress.hamlet @@ -0,0 +1,18 @@ +

+

+ Pairing in progress .. + $if T.null secret +

+ You do not need to leave this page open; pairing will finish # + automatically. + $else +

+ Now you should either go tell the owner of the computer you want to pair # + with the secret phrase you selected ("#{secret}"), or go enter it into # + the computer you want to pair with. +

+ You do not need to leave this page open; pairing will finish automatically # + as soon as the secret phrase is entered into the other computer. +

+ If you're not seeing a pair request on the other computer, try moving # + it to the same switch or wireless network as this one. diff --git a/templates/configurators/pairing/local/prompt.hamlet b/templates/configurators/pairing/local/prompt.hamlet new file mode 100644 index 0000000..24de62b --- /dev/null +++ b/templates/configurators/pairing/local/prompt.hamlet @@ -0,0 +1,50 @@ +

+

+ Pairing with a local computer +

+ $if start + Pair with a computer on your local network (or VPN), and the # + two git annex repositories will be combined into one, with changes # + kept in sync between them. + $else + Pairing with #{username}@#{hostname} will combine your two git annex # + repositories into one, allowing you to share files. +

+ $if start + For security, enter a secret phrase. This same secret phrase will # + also need to be entered on the computer you're pairing with. # + It will be used to verify you're pairing with the right computer. + $else + $if sameusername + For security, you need to enter the same secret phrase that was # + entered on #{hostname} when the pairing was started. + $else + For security, a secret phrase has been selected, which you need # + to enter here to finish the pairing. If you don't know the # + phrase, go ask #{username} ... + $if badphrase +

+ #{problem} +

+

+
+ ^{form} + ^{authtoken} +
+