Bench.hs 6.52 KB
Newer Older
1 2
module Bench( -- sequential benchmarks
              seq
3
              -- parallel benchmarks
4
            , par
5
              -- distributed benhcmarks
6
            , dist
7
              -- miscellaneous
8
            , main
9
            , getAnswer
10 11
            ) where

12
import           Control.Distributed.Process
13
import qualified Control.Distributed.Process.Backend.SimpleLocalnet as SLN
14
import           Control.Distributed.Process.Node
15
import           Prelude                                                    hiding (seq)
16
import           Network.Transport.TCP
17
import           System.Environment                                                (getArgs)
18

19 20
import           MasterWorker
import           Utils
21

22 23 24
type Result = ([Vertex], [MasterStats])
--type Result = String

25
-- | Gets the result of the calculation
26 27 28 29
result :: ([Vertex], [MasterStats]) -> Result
result = id
--result = sz . snd

30 31 32 33 34
-- | Gets the size (as a string) from the result
getAnswer :: Result -> String
getAnswer = sz . snd
--getAnswer = id

35 36 37 38 39 40 41
-----------------------------------------------------------------------------
-- benchmarks, parametrised by
-- * list of Generators
-- * size of space N > 0
-- * number of processors P > 0 (per node)
-- * list of Workers (in short node name format 'name@host')
-- sequential orbit computation
42
seq :: (Vertex -> GenClos) -> Vertex -> Process Result
43
seq generators n =
44 45
    orbit (generators n) [0] (Seq (2 * n))
      >>= return . result
46

47 48 49
-- parallel orbit computation (w/ False does not spawn image computation)
par :: Bool -> (Vertex -> GenClos) -> Vertex -> Int -> Process Result
par iwp generators n p =
50
     orbit (generators n) [0]
51 52
       (Par (JustOne (p, ((2 * n) `div` p) + 1, 0, iwp)))
       >>= return . result
53

54 55 56
-- distributed orbit computation (w/ False does not spawn image computation)
dist :: Bool -> (Vertex -> GenClos) -> Vertex -> Int -> [NodeId] -> Process Result
dist iwp generators n p workers =
57
    orbit (generators n) [0]
58 59
      (Par (Many [(h, p, (2 * n) `div` (w * p) + 1, 0, iwp) | h <- workers]))
      >>= return . result
60 61
  where w = length workers

62
sz :: [MasterStats] -> String
63
sz [] = "false"
64
sz (mainStats : _) =
65 66 67
    case "size" `lookup` mainStats of
        Nothing -> "false"
        Just s  -> "{size," ++ s ++ "}"
68

69 70 71
select_par_bench :: String -> (Vertex -> GenClos) -> Vertex -> Int -> Process Result
select_par_bench "True" = par True
select_par_bench "False" = par False
72 73
select_par_bench _ = error "Invalid IWP Flag"

74 75 76
select_dist_bench :: String -> (Vertex -> GenClos) -> Vertex -> Int -> [NodeId] -> Process Result
select_dist_bench "True" = dist True
select_dist_bench "False" = dist False
77 78
select_dist_bench _ = error "Invalid IWP Flag"

79
bench_args :: String -> (Vertex -> GenClos, Int)
80
bench_args "short" = (gg13, 15000)
81 82
bench_args "intermediate" = (gg124, 40000)
bench_args "long" = (gg1245, 60000)
83 84 85 86 87 88
bench_args _ = error "Invalid Version"

main :: IO ()
main = do
    args <- getArgs
    case args of
89 90 91 92 93 94 95 96
        -- Sequential Orbit
        ["seq", version, host, port] -> do
            let (gnrt, n) = bench_args version
            Right t <- createTransport host port defaultTCPParameters
            node <- newLocalNode t rtable
            runProcess node $ do
                res <- seq gnrt n
                liftIO $ print res
97 98 99 100 101 102 103 104 105
        -- Parallel Orbit
        ["par", iwp, version, w, host, port] -> do
            let (gnrt, n) = bench_args version
            Right t <- createTransport host port defaultTCPParameters
            node <- newLocalNode t rtable
            runProcess node $ do
                let bench = select_par_bench iwp
                res <- bench gnrt n (read w :: Int)
                liftIO $ print res
106 107 108 109 110 111 112 113 114
        -- Distributed Orbit
        ["dist", "master", iwp, version, w, host, port] -> do
            let (gnrt, n) = bench_args version
            b <- SLN.initializeBackend host port rtable
            print $ "Starting master @ " ++ host ++ ":" ++ port ++ " with slaves:"
            SLN.startMaster b $ \slaves -> do
                let bench = select_dist_bench iwp
                liftIO $ print $ "  " ++ show slaves
                res <- bench gnrt n (read w :: Int) slaves
115
                SLN.terminateAllSlaves b
116 117 118 119 120
                liftIO $ print res
        ["dist", "slave", host, port] -> do
            b <- SLN.initializeBackend host port rtable
            print $ "Starting slave @ " ++ host ++ ":" ++ port
            SLN.startSlave b
121 122
        -- Invalid configuration
        _ -> do
123 124 125 126 127 128 129 130 131 132
            putStrLn "Usage:"
            putStrLn "  Sequential Version"
            putStrLn "    ./orbit seq [short|intermediate|long] host port"
            putStrLn "  Paraller Version"
            putStrLn "    ./orbit par [True|False] [short|intermediate|long] nWorkers host port"
            putStrLn "  Distributed Version"
            putStrLn "  - Master Node:"
            putStrLn "    ./orbit dist master [True|False] [short|intermediate|long] nWorkers host port"
            putStrLn "  - Slave Node:"
            putStrLn "    ./orbit dist slave host port"
133 134 135 136
    where rtable :: RemoteTable
          rtable = MasterWorker.__remoteTable initRemoteTable

{-
137 138
main :: IO ()
main = do
139 140
    Right t1 <- createTransport "127.0.0.1" "5050" defaultTCPParameters
    node1 <- newLocalNode t1 rtable
141

142 143 144 145 146
    Right t2 <- createTransport "127.0.0.1" "5051" defaultTCPParameters
    node2 <- newLocalNode t2 rtable

    Right t3 <- createTransport "127.0.0.1" "5052" defaultTCPParameters
    node3 <- newLocalNode t3 rtable
147

148
    runProcess node1 $ do
149
        res <- par_seq gg1245 2512 32 --[localNodeId node1, localNodeId node2, localNodeId node3]
150 151 152
        liftIO $ print res
  where rtable :: RemoteTable
        rtable = MasterWorker.__remoteTable initRemoteTable
153 154
-}

155 156 157 158
{-
import qualified Control.Distributed.Process.Backend.SimpleLocalnet as SLN
import           System.Environment                                        (getArgs)

159 160 161 162 163 164 165 166 167 168 169 170 171 172
    args <- getArgs

    case args of
        ["master", host, port] -> do
            b <- SLN.initializeBackend host port rtable
            print $ "Starting master @ " ++ host ++ ":" ++ port ++ " with slaves:"
            SLN.startMaster b $ \slaves -> do
                liftIO $ print $ "  " ++ show slaves
                res <- dist gg13 11 2 slaves
                liftIO $ print res
        ["slave", host, port] -> do
            b <- SLN.initializeBackend host port rtable
            print $ "Starting slave @ " ++ host ++ ":" ++ port
            SLN.startSlave b
173
-}
174 175 176
    -- 1 second wait. Otherwise the main thread can terminate before
    -- our messages reach the logging process or get flushed to stdio
    --threadDelay (1 * 1000000)