*** Running test-and-or
(command.AndOr
  children: [
    (command.Simple blame_tok:<ls> more_env:[] words:[{<ls>}] redirects:[] do_fork:T)
    (command.Simple
      blame_tok: <echo>
      more_env: []
      words: [{<echo>} {(DQ ($ Id.VSub_At '@'))}]
      redirects: []
      do_fork: T
    )
    (command.Simple
      blame_tok: <die>
      more_env: []
      words: [{<die>} {(DQ <foo>)}]
      redirects: []
      do_fork: T
    )
  ]
  ops: [<Id.Op_DAmp _> <Id.Op_DPipe _>]
)
(command.AndOr
  children: [
    (command.Simple blame_tok:<ls> more_env:[] words:[{<ls>}] redirects:[] do_fork:T)
    (command.Simple
      blame_tok: <echo>
      more_env: []
      words: [{<echo>} {(word_part.Splice blame_tok:<Id.Lit_Splice '@ARGV'> var_name:ARGV)}]
      redirects: []
      do_fork: T
    )
    (command.Simple
      blame_tok: <die>
      more_env: []
      words: [{<die>} {(DQ <foo>)}]
      redirects: []
      do_fork: T
    )
  ]
  ops: [<Id.Op_DAmp _> <Id.Op_DPipe _>]
)
ls && echo @ARGV || die "foo"
OK
OK  test-and-or
*** Running test-args-for-loop
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter__Args)
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: 
    (for_iter.Words words:[{(word_part.Splice blame_tok:<Id.Lit_Splice '@ARGV'> var_name:ARGV)}])
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)
for x in @ARGV {
  echo $x
}

OK
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter__Args)
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: 
    (for_iter.Words words:[{(word_part.Splice blame_tok:<Id.Lit_Splice '@ARGV'> var_name:ARGV)}])
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)
for x in @ARGV {
  echo $x
}

OK
OK  test-args-for-loop
*** Running test-backticks-TODO
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_Backtick '`'>
        child: 
          (command.Simple
            blame_tok: <echo>
            more_env: []
            words: [{<echo>} {<hi>} {(${ Id.VSub_Name var)}]
            redirects: []
            do_fork: T
          )
        right: <Id.Backtick_Right '`'>
      )
    }
  ]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_DollarParen '$('>
        child: 
          (command.Simple
            blame_tok: <echo>
            more_env: []
            words: [{<echo>} {<hi>} {(${ Id.VSub_Name var)}]
            redirects: []
            do_fork: T
          )
        right: <Id.Eof_RParen _>
      )
    }
  ]
  redirects: []
  do_fork: T
)
echo $(echo hi ${var})
OK
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_DollarParen '$('>
        child: 
          (BraceGroup
            left: <Id.Lit_LBrace '{'>
            children: [
              (command.Sentence
                child: 
                  (command.Simple
                    blame_tok: <echo>
                    more_env: []
                    words: [{<echo>} {<hi>}]
                    redirects: []
                    do_fork: T
                  )
                terminator: <Id.Op_Semi _>
              )
            ]
            redirects: []
            right: <Id.Lit_RBrace '}'>
          )
        right: <Id.Eof_RParen _>
      )
    }
  ]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_DollarParen '$('>
        child: 
          (BraceGroup
            left: <Id.Lit_LBrace '{'>
            children: [
              (command.Sentence
                child: 
                  (command.Simple
                    blame_tok: <echo>
                    more_env: []
                    words: [{<echo>} {<hi>}]
                    redirects: []
                    do_fork: T
                  )
                terminator: <Id.Op_Semi _>
              )
            ]
            redirects: []
            right: <Id.Lit_RBrace '}'>
          )
        right: <Id.Eof_RParen _>
      )
    }
  ]
  redirects: []
  do_fork: T
)
echo $({ echo hi; })
OK
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_Backtick '`'>
        child: 
          (BraceGroup
            left: <Id.Lit_LBrace '{'>
            children: [
              (command.Sentence
                child: 
                  (command.Simple
                    blame_tok: <echo>
                    more_env: []
                    words: [{<echo>} {<hi>}]
                    redirects: []
                    do_fork: T
                  )
                terminator: <Id.Op_Semi _>
              )
            ]
            redirects: []
            right: <Id.Lit_RBrace '}'>
          )
        right: <Id.Backtick_Right '`'>
      )
    }
  ]
  redirects: []
  do_fork: T
)
echo $(do { echo hi)
OK
OK  test-backticks-TODO
*** Running test-bare-assign-TODO
(command.ShAssignment
  left: <Id.Lit_VarLike 'a='>
  pairs: [
    (AssignPair
      left: <Id.Lit_VarLike 'a='>
      lhs: (sh_lhs.Name left:<Id.Lit_VarLike 'a='> name:a)
      op: assign_op.Equal
      rhs: (rhs_word__Empty)
    )
  ]
  redirects: []
)
(command.Mutation
  keyword: <Id.KW_SetVar setvar>
  lhs: [(y_lhs.Var name:<Id.Expr_Name a>)]
  op: <Id.Arith_Equal _>
  rhs: (SQ )
)

setvar a = ''

OK
(command.ShAssignment
  left: <Id.Lit_VarLike 'a='>
  pairs: [
    (AssignPair
      left: <Id.Lit_VarLike 'a='>
      lhs: (sh_lhs.Name left:<Id.Lit_VarLike 'a='> name:a)
      op: assign_op.Equal
      rhs: {<b>}
    )
  ]
  redirects: []
)
(command.Mutation
  keyword: <Id.KW_SetVar setvar>
  lhs: [(y_lhs.Var name:<Id.Expr_Name a>)]
  op: <Id.Arith_Equal _>
  rhs: (SQ <b>)
)

setvar a = 'b'

OK
(command.ShAssignment
  left: <Id.Lit_VarLike 'a='>
  pairs: [
    (AssignPair
      left: <Id.Lit_VarLike 'a='>
      lhs: (sh_lhs.Name left:<Id.Lit_VarLike 'a='> name:a)
      op: assign_op.Equal
      rhs: 
        {
          (CommandSub
            left_token: <Id.Left_DollarParen '$('>
            child: 
              (command.Simple
                blame_tok: <hostname>
                more_env: []
                words: [{<hostname>}]
                redirects: []
                do_fork: T
              )
            right: <Id.Eof_RParen _>
          )
        }
    )
  ]
  redirects: []
)
(command.Mutation
  keyword: <Id.KW_SetVar setvar>
  lhs: [(y_lhs.Var name:<Id.Expr_Name a>)]
  op: <Id.Arith_Equal _>
  rhs: 
    (CommandSub
      left_token: <Id.Left_DollarParen '$('>
      child: 
        (command.Simple
          blame_tok: <hostname>
          more_env: []
          words: [{<hostname>}]
          redirects: []
          do_fork: T
        )
      right: <Id.Eof_RParen _>
    )
)

setvar a = $(hostname)

OK
(command.ShAssignment
  left: <Id.Lit_VarLike 'a='>
  pairs: [
    (AssignPair
      left: <Id.Lit_VarLike 'a='>
      lhs: (sh_lhs.Name left:<Id.Lit_VarLike 'a='> name:a)
      op: assign_op.Equal
      rhs: 
        {
          (BracedVarSub
            left: <Id.Left_DollarBrace '${'>
            token: <Id.VSub_Name PATH>
            var_name: PATH
            suffix_op: (suffix_op.Unary op:<Id.VTest_ColonHyphen _> arg_word:{})
            right: <Id.Right_DollarBrace '}'>
          )
        }
    )
  ]
  redirects: []
)
(command.Mutation
  keyword: <Id.KW_SetVar setvar>
  lhs: [(y_lhs.Var name:<Id.Expr_Name a>)]
  op: <Id.Arith_Equal _>
  rhs: 
    (BracedVarSub
      left: <Id.Left_DollarBrace '${'>
      token: <Id.VSub_Name PATH>
      var_name: PATH
      suffix_op: (suffix_op.Unary op:<Id.VTest_ColonHyphen _> arg_word:{})
      right: <Id.Right_DollarBrace '}'>
    )
)

setvar a = ${PATH:-}

OK
OK  test-bare-assign-TODO
*** Running test-brace-group
(BraceGroup
  left: <Id.Lit_LBrace '{'>
  children: [
    (command.Sentence
      child: 
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<hi>}]
          redirects: []
          do_fork: T
        )
      terminator: <Id.Op_Semi _>
    )
  ]
  redirects: []
  right: <Id.Lit_RBrace '}'>
)
do { echo hi; }
OK
(BraceGroup
  left: <Id.Lit_LBrace '{'>
  children: [
    (command.Sentence
      child: 
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<hi>}]
          redirects: []
          do_fork: T
        )
      terminator: <Id.Op_Semi _>
    )
    (command.Sentence
      child: 
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<bye>}]
          redirects: []
          do_fork: T
        )
      terminator: <Id.Op_Semi _>
    )
  ]
  redirects: []
  right: <Id.Lit_RBrace '}'>
)
do { echo hi; echo bye; }
OK
OK  test-brace-group
*** Running test-bracket-builtin
(command.AndOr
  children: [
    (command.Simple
      blame_tok: <Id.Lit_LBracket '['>
      more_env: []
      words: [
        {<Id.Lit_LBracket '['>}
        {<Id.KW_Bang '!'>}
        {<-z>}
        {(DQ ($ Id.VSub_DollarName foo))}
        {<Id.Lit_RBracket ']'>}
      ]
      redirects: []
      do_fork: T
    )
    (command.Simple blame_tok:<die> more_env:[] words:[{<die>}] redirects:[] do_fork:T)
  ]
  ops: [<Id.Op_DPipe _>]
)
(command.AndOr
  children: [
    (command.Simple
      blame_tok: <test>
      more_env: []
      words: [{<test>} {<Id.KW_Bang '!'>} {<-z>} {($ Id.VSub_DollarName foo)}]
      redirects: []
      do_fork: T
    )
    (command.Simple blame_tok:<die> more_env:[] words:[{<die>}] redirects:[] do_fork:T)
  ]
  ops: [<Id.Op_DPipe _>]
)
test ! -z $foo || die
OK
(command.If
  if_kw: <Id.KW_If if>
  arms: [
    (IfArm
      keyword: <Id.KW_If if>
      cond: 
        (condition.Shell
          commands: [
            (command.Sentence
              child: 
                (command.Simple
                  blame_tok: <Id.Lit_LBracket '['>
                  more_env: []
                  words: [
                    {<Id.Lit_LBracket '['>}
                    {(DQ ($ Id.VSub_DollarName foo))}
                    {<-eq>}
                    {<3>}
                    {<Id.Lit_RBracket ']'>}
                  ]
                  redirects: []
                  do_fork: T
                )
              terminator: <Id.Op_Semi _>
            )
          ]
        )
      then_kw: <Id.KW_Then then>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<yes>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [1 16]
    )
  ]
  else_action: []
  fi_kw: <Id.KW_Fi fi>
  redirects: []
)
(command.If
  if_kw: <Id.KW_If if>
  arms: [
    (IfArm
      keyword: <Id.KW_If if>
      cond: 
        (condition.Shell
          commands: [
            (command.Simple
              blame_tok: <test>
              more_env: []
              words: [{<test>} {($ Id.VSub_DollarName foo)} {<-eq>} {<3>}]
              redirects: []
              do_fork: T
            )
          ]
        )
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<yes>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [1]
    )
  ]
  else_action: []
  redirects: []
)

if test $foo -eq 3 {
  echo yes
}
OK
OK  test-bracket-builtin
*** Running test-case
(command.Case
  case_kw: <Id.KW_Case case>
  to_match: (case_arg.Word w:{($ Id.VSub_DollarName var)})
  arms_start: <Id.KW_In in>
  arms: [
    (CaseArm
      left: <foo>
      pattern: (pat.Words words:[{<foo>} {<bar>}])
      middle: <Id.Right_CasePat _>
      action: [
        (command.AndOr
          children: [
            (command.Simple
              blame_tok: <Id.Lit_LBracket '['>
              more_env: []
              words: [{<Id.Lit_LBracket '['>} {<-f>} {<foo>} {<Id.Lit_RBracket ']'>}]
              redirects: []
              do_fork: T
            )
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<file>}]
              redirects: []
              do_fork: T
            )
          ]
          ops: [<Id.Op_DAmp _>]
        )
      ]
      right: <Id.Op_DSemi _>
    )
    (CaseArm
      left: <Id.Left_DoubleQuote '"'>
      pattern: (pat.Words words:[{(DQ )}])
      middle: <Id.Right_CasePat _>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<empty>}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.Op_DSemi _>
    )
    (CaseArm
      left: <Id.Lit_Star '*'>
      pattern: (pat.Words words:[{<Id.Lit_Star '*'>}])
      middle: <Id.Right_CasePat _>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<default>}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.Op_DSemi _>
    )
  ]
  arms_end: <Id.KW_Esac esac>
  redirects: []
)
(command.Case
  case_kw: <Id.KW_Case case>
  to_match: (case_arg.YshExpr e:(expr.Var name:<Id.Expr_Name var>))
  arms_start: <Id.Lit_LBrace '{'>
  arms: [
    (CaseArm
      left: <foo>
      pattern: (pat.Words words:[{<foo>} {<bar>}])
      middle: <Id.Lit_LBrace '{'>
      action: [
        (command.AndOr
          children: [
            (command.Simple
              blame_tok: <test>
              more_env: []
              words: [{<test>} {<-f>} {<foo>}]
              redirects: []
              do_fork: T
            )
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<file>}]
              redirects: []
              do_fork: T
            )
          ]
          ops: [<Id.Op_DAmp _>]
        )
      ]
      right: <Id.Lit_RBrace '}'>
    )
    (CaseArm
      left: <Id.Left_DoubleQuote '"'>
      pattern: (pat.Words words:[{(DQ )}])
      middle: <Id.Lit_LBrace '{'>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<empty>}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.Lit_RBrace '}'>
    )
    (CaseArm
      left: <Id.Lit_Star '*'>
      pattern: (pat.Words words:[{<Id.Lit_Star '*'>}])
      middle: <Id.Lit_LBrace '{'>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<default>}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.Lit_RBrace '}'>
    )
  ]
  arms_end: <Id.Lit_RBrace _>
  redirects: []
)

case (var) {
  foo|bar {
    test -f foo && echo file
    }
  "" {
    echo empty
    }
  * {
    echo default
    }
}

OK
(command.Case
  case_kw: <Id.KW_Case case>
  to_match: (case_arg.Word w:{(DQ ($ Id.VSub_DollarName var))})
  arms_start: <Id.KW_In in>
  arms: [
    (CaseArm
      left: <Id.Lit_Star '*'>
      pattern: (pat.Words words:[{<Id.Lit_Star '*'>}])
      middle: <Id.Right_CasePat _>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<foo>}]
          redirects: []
          do_fork: T
        )
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<bar>}]
          redirects: []
          do_fork: T
        )
      ]
    )
  ]
  arms_end: <Id.KW_Esac esac>
  redirects: []
)
(command.Case
  case_kw: <Id.KW_Case case>
  to_match: (case_arg.YshExpr e:(expr.Var name:<Id.Expr_Name var>))
  arms_start: <Id.Lit_LBrace '{'>
  arms: [
    (CaseArm
      left: <Id.Lit_Star '*'>
      pattern: (pat.Words words:[{<Id.Lit_Star '*'>}])
      middle: <Id.Lit_LBrace '{'>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<foo>}]
          redirects: []
          do_fork: T
        )
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<bar>}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.Lit_RBrace '}'>
    )
  ]
  arms_end: <Id.Lit_RBrace _>
  redirects: []
)

case (var) {
  * {
    echo foo
    echo bar  # no dsemi
}
}

OK
OK  test-case
*** Running test-command-sub
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_DollarParen '$('>
        child: 
          (command.Simple
            blame_tok: <echo>
            more_env: []
            words: [{<echo>} {<hi>}]
            redirects: []
            do_fork: T
          )
        right: <Id.Eof_RParen _>
      )
    }
  ]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_DollarParen '$('>
        child: 
          (command.Simple
            blame_tok: <echo>
            more_env: []
            words: [{<echo>} {<hi>}]
            redirects: []
            do_fork: T
          )
        right: <Id.Eof_RParen _>
      )
    }
  ]
  redirects: []
  do_fork: T
)
echo $(echo hi)
OK
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (DQ <__> 
        (CommandSub
          left_token: <Id.Left_DollarParen '$('>
          child: 
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<hi>}]
              redirects: []
              do_fork: T
            )
          right: <Id.Eof_RParen _>
        ) <__>
      )
    }
  ]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (DQ <__> 
        (CommandSub
          left_token: <Id.Left_DollarParen '$('>
          child: 
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<hi>}]
              redirects: []
              do_fork: T
            )
          right: <Id.Eof_RParen _>
        ) <__>
      )
    }
  ]
  redirects: []
  do_fork: T
)
echo "__$(echo hi)__"
OK
OK  test-command-sub
*** Running test-dollar-at
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {<one>} {(DQ ($ Id.VSub_At '@'))} {<two>}]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {<one>} {(word_part.Splice blame_tok:<Id.Lit_Splice '@ARGV'> var_name:ARGV)} {<two>}]
  redirects: []
  do_fork: T
)
echo one @ARGV two
OK
OK  test-dollar-at
*** Running test-empty-for-loop
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter.Words words:[])
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter.Words words:[])
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)

for x in
{
  echo $x
}

OK
OK  test-empty-for-loop
*** Running test-for-loop
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter.Words words:[{<a>} {<b>} {<c>} {<d>} {<e>} {<f>}])
  semi_tok: <Id.Op_Semi _>
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter.Words words:[{<a>} {<b>} {<c>} {<d>} {<e>} {<f>}])
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)

for x in a b c \
  d e f {
  echo $x
}

OK
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter.Words words:[{<a>} {<b>} {<c>} {<d>} {<e>} {<f>}])
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.ForEach
  keyword: <Id.KW_For for>
  iter_names: [x]
  iterable: (for_iter.Words words:[{<a>} {<b>} {<c>} {<d>} {<e>} {<f>}])
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName x)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)

for x in a b c \
  d e f
{
  echo $x
}

OK
OK  test-for-loop
*** Running test-here-doc
(command.Simple
  blame_tok: <cat>
  more_env: []
  words: [{<cat>}]
  redirects: [
    (Redir
      op: <Id.Redir_DLess '<<'>
      loc: (redir_loc.Fd fd:0)
      arg: 
        (redir_param.HereDoc
          here_begin: {<EOF>}
          here_end_tok: <Id.Undefined_Tok ''>
          stdin_parts: [<'hi\n'>]
        )
    )
  ]
  do_fork: T
)
(command.Simple
  blame_tok: <cat>
  more_env: []
  words: [{<cat>}]
  redirects: [
    (Redir
      op: <Id.Redir_TLess '<<<'>
      loc: (redir_loc.Fd fd:0)
      arg: 
        {
          (DoubleQuoted
            left: <Id.Left_TDoubleQuote '"'>
            parts: [<'hi\n'>]
            right: <Id.Right_DoubleQuote '"'>
          )
        }
    )
  ]
  do_fork: T
)

cat <<< """
hi
"""

OK
(command.Simple
  blame_tok: <cat>
  more_env: []
  words: [{<cat>}]
  redirects: [
    (Redir
      op: <Id.Redir_DLess '<<'>
      loc: (redir_loc.Fd fd:0)
      arg: 
        (redir_param.HereDoc
          here_begin: {(SQ <EOF>)}
          here_end_tok: <Id.Undefined_Tok ''>
          stdin_parts: [<'hi\n'>]
        )
    )
  ]
  do_fork: T
)
(command.Simple
  blame_tok: <cat>
  more_env: []
  words: [{<cat>}]
  redirects: [
    (Redir
      op: <Id.Redir_TLess '<<<'>
      loc: (redir_loc.Fd fd:0)
      arg: 
        {
          (SingleQuoted
            left: <Id.Left_TSingleQuote '\''>
            tokens: [<'hi\n'>]
            right: <Id.Right_SingleQuote '\''>
          )
        }
    )
  ]
  do_fork: T
)

cat <<< '''
hi
'''

OK
OK  test-here-doc
*** Running test-if
(command.If
  if_kw: <Id.KW_If if>
  arms: [
    (IfArm
      keyword: <Id.KW_If if>
      cond: 
        (condition.Shell
          commands: [
            (command.Sentence
              child: 
                (command.Simple
                  blame_tok: <true>
                  more_env: []
                  words: [{<true>}]
                  redirects: []
                  do_fork: T
                )
              terminator: <Id.Op_Semi _>
            )
          ]
        )
      then_kw: <Id.KW_Then then>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<yes>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [1 6]
    )
  ]
  else_action: []
  fi_kw: <Id.KW_Fi fi>
  redirects: []
)
(command.If
  if_kw: <Id.KW_If if>
  arms: [
    (IfArm
      keyword: <Id.KW_If if>
      cond: 
        (condition.Shell
          commands: [
            (command.Simple
              blame_tok: <true>
              more_env: []
              words: [{<true>}]
              redirects: []
              do_fork: T
            )
          ]
        )
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<yes>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [1]
    )
  ]
  else_action: []
  redirects: []
)

if true {
  echo yes
}
OK
(command.If
  if_kw: <Id.KW_If if>
  arms: [
    (IfArm
      keyword: <Id.KW_If if>
      cond: 
        (condition.Shell
          commands: [
            (command.Sentence
              child: 
                (command.Simple
                  blame_tok: <true>
                  more_env: []
                  words: [{<true>}]
                  redirects: []
                  do_fork: T
                )
              terminator: <Id.Op_Semi _>
            )
          ]
        )
      then_kw: <Id.KW_Then then>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<yes>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [1 6]
    )
    (IfArm
      keyword: <Id.KW_Elif elif>
      cond: 
        (condition.Shell
          commands: [
            (command.Sentence
              child: 
                (command.Simple
                  blame_tok: <false>
                  more_env: []
                  words: [{<false>}]
                  redirects: []
                  do_fork: T
                )
              terminator: <Id.Op_Semi _>
            )
          ]
        )
      then_kw: <Id.KW_Then then>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<Id.KW_Elif elif>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [13 18]
    )
    (IfArm
      keyword: <Id.KW_Elif elif>
      cond: 
        (condition.Shell
          commands: [
            (command.Sentence
              child: 
                (command.Simple
                  blame_tok: <spam>
                  more_env: []
                  words: [{<spam>}]
                  redirects: []
                  do_fork: T
                )
              terminator: <Id.Op_Semi _>
            )
          ]
        )
      then_kw: <Id.KW_Then then>
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<Id.KW_Elif elif>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [25 30]
    )
  ]
  else_kw: <Id.KW_Else else>
  else_action: [
    (command.Simple blame_tok:<echo> more_env:[] words:[{<echo>} {<no>}] redirects:[] do_fork:T)
  ]
  fi_kw: <Id.KW_Fi fi>
  redirects: []
)
(command.If
  if_kw: <Id.KW_If if>
  arms: [
    (IfArm
      keyword: <Id.KW_If if>
      cond: 
        (condition.Shell
          commands: [
            (command.Simple
              blame_tok: <true>
              more_env: []
              words: [{<true>}]
              redirects: []
              do_fork: T
            )
          ]
        )
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<yes>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [1]
    )
    (IfArm
      keyword: <Id.KW_Elif elif>
      cond: 
        (condition.Shell
          commands: [
            (command.Simple
              blame_tok: <false>
              more_env: []
              words: [{<false>}]
              redirects: []
              do_fork: T
            )
          ]
        )
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<Id.KW_Elif elif>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [14]
    )
    (IfArm
      keyword: <Id.KW_Elif elif>
      cond: 
        (condition.Shell
          commands: [
            (command.Simple
              blame_tok: <spam>
              more_env: []
              words: [{<spam>}]
              redirects: []
              do_fork: T
            )
          ]
        )
      action: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<Id.KW_Elif elif>}]
          redirects: []
          do_fork: T
        )
      ]
      spids: [27]
    )
  ]
  else_action: [
    (command.Simple blame_tok:<echo> more_env:[] words:[{<echo>} {<no>}] redirects:[] do_fork:T)
  ]
  redirects: []
)

if true {
  echo yes
} elif false {
  echo elif
} elif spam {
  echo elif
} else {
  echo no
}
OK
OK  test-if
*** Running test-ksh-func
(command.ShFunction
  keyword: <Id.KW_Function function>
  name_tok: <func1>
  name: func1
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<func1>}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
)
(Proc
  keyword: <Id.KW_Proc proc>
  name: <func1>
  sig: (proc_sig__Open)
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace _>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<func1>}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
)

proc func1 {  # no parens
  echo func1
}
OK
OK  test-ksh-func
*** Running test-line-breaks
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {<one>} {<two>} {<three>} {<four>}]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {<one>} {<two>} {<three>} {<four>}]
  redirects: []
  do_fork: T
)

echo one \
  two three \
  four

OK
OK  test-line-breaks
*** Running test-posix-func
(command.ShFunction
  name_tok: <f>
  name: f
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {(DQ <hi>)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
)
(Proc
  keyword: <Id.KW_Proc proc>
  name: <f>
  sig: (proc_sig__Open)
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace _>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {(DQ <hi>)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
)

  proc f {
    echo "hi"
  }
OK
(command.ShFunction
  name_tok: <f>
  name: f
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {(DQ <hi>)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
)
(Proc
  keyword: <Id.KW_Proc proc>
  name: <f>
  sig: (proc_sig__Open)
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace _>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {(DQ <hi>)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
)

  proc f {
    echo "hi"
  }
OK
OK  test-posix-func
*** Running test-simple-command
(command.Simple blame_tok:<echo> more_env:[] words:[{<echo>} {<hi>}] redirects:[] do_fork:T)
(command.Simple blame_tok:<echo> more_env:[] words:[{<echo>} {<hi>}] redirects:[] do_fork:T)
echo hi
OK
OK  test-simple-command
*** Running test-source-builtin
(command.Simple blame_tok:<.> more_env:[] words:[{<.>} {<lib.sh>}] redirects:[] do_fork:T)
(command.Simple blame_tok:<source> more_env:[] words:[{<source>} {<lib.sh>}] redirects:[] do_fork:T)
source lib.sh
OK
(command.AndOr
  children: [
    (command.Simple
      blame_tok: <Id.Lit_LBracket '['>
      more_env: []
      words: [{<Id.Lit_LBracket '['>} {<-f>} {<lib.sh>} {<Id.Lit_RBracket ']'>}]
      redirects: []
      do_fork: T
    )
    (command.Simple blame_tok:<.> more_env:[] words:[{<.>} {<lib.sh>}] redirects:[] do_fork:T)
  ]
  ops: [<Id.Op_DAmp _>]
)
(command.AndOr
  children: [
    (command.Simple
      blame_tok: <test>
      more_env: []
      words: [{<test>} {<-f>} {<lib.sh>}]
      redirects: []
      do_fork: T
    )
    (command.Simple
      blame_tok: <source>
      more_env: []
      words: [{<source>} {<lib.sh>}]
      redirects: []
      do_fork: T
    )
  ]
  ops: [<Id.Op_DAmp _>]
)
test -f lib.sh && source lib.sh
OK
OK  test-source-builtin
*** Running test-subshell
(command.Subshell
  left: <Id.Op_LParen _>
  child: 
    (command.Sentence
      child: 
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<hi>}]
          redirects: []
          do_fork: T
        )
      terminator: <Id.Op_Semi _>
    )
  right: <Id.Right_Subshell _>
  redirects: []
)
shell {echo hi;}
OK
(command.Subshell
  left: <Id.Op_LParen _>
  child: (command.Simple blame_tok:<echo> more_env:[] words:[{<echo>} {<hi>}] redirects:[] do_fork:T)
  right: <Id.Right_Subshell _>
  redirects: []
)
shell {echo hi}
OK
(command.Subshell
  left: <Id.Op_LParen _>
  child: 
    (command.CommandList
      children: [
        (command.Sentence
          child: 
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<hi>}]
              redirects: []
              do_fork: T
            )
          terminator: <Id.Op_Semi _>
        )
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {<bye>}]
          redirects: []
          do_fork: T
        )
      ]
    )
  right: <Id.Right_Subshell _>
  redirects: []
)
shell {echo hi; echo bye}
OK
(command.Subshell
  left: <Id.Op_LParen _>
  child: 
    (command.Subshell
      left: <Id.Op_LParen _>
      child: 
        (command.CommandList
          children: [
            (command.Sentence
              child: 
                (command.Simple
                  blame_tok: <echo>
                  more_env: []
                  words: [{<echo>} {<hi>}]
                  redirects: []
                  do_fork: T
                )
              terminator: <Id.Op_Semi _>
            )
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<bye>}]
              redirects: []
              do_fork: T
            )
          ]
        )
      right: <Id.Right_Subshell _>
      redirects: []
    )
  right: <Id.Right_Subshell _>
  redirects: []
)
shell { shell {echo hi; echo bye } }
OK
OK  test-subshell
*** Running test-unquote-subs-TODO
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {(DQ ($ Id.VSub_Number 1))} {(DQ ($ Id.VSub_DollarName foo))}]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {($ Id.VSub_Number 1)} {($ Id.VSub_DollarName foo)}]
  redirects: []
  do_fork: T
)
echo $1 $foo
OK
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (DQ 
        (CommandSub
          left_token: <Id.Left_DollarParen '$('>
          child: 
            (command.Simple
              blame_tok: <echo>
              more_env: []
              words: [{<echo>} {<hi>}]
              redirects: []
              do_fork: T
            )
          right: <Id.Eof_RParen _>
        )
      )
    }
  ]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {
      (CommandSub
        left_token: <Id.Left_DollarParen '$('>
        child: 
          (command.Simple
            blame_tok: <echo>
            more_env: []
            words: [{<echo>} {<hi>}]
            redirects: []
            do_fork: T
          )
        right: <Id.Eof_RParen _>
      )
    }
  ]
  redirects: []
  do_fork: T
)
echo $(echo hi)
OK
OK  test-unquote-subs-TODO
*** Running test-var-sub
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {($ Id.VSub_DollarName foo)}]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [{<echo>} {($ Id.VSub_DollarName foo)}]
  redirects: []
  do_fork: T
)
echo $foo
OK
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {($ Id.VSub_DollarName foo)}
    {(${ Id.VSub_Name bar)}
    {(DQ <__> (${ Id.VSub_Name bar) <__>)}
  ]
  redirects: []
  do_fork: T
)
(command.Simple
  blame_tok: <echo>
  more_env: []
  words: [
    {<echo>}
    {($ Id.VSub_DollarName foo)}
    {(${ Id.VSub_Name bar)}
    {(DQ <__> (${ Id.VSub_Name bar) <__>)}
  ]
  redirects: []
  do_fork: T
)
echo $foo ${bar} "__${bar}__"
OK
OK  test-var-sub
*** Running test-while-loop
(command.WhileUntil
  keyword: <Id.KW_While while>
  cond: 
    (condition.Shell
      commands: [
        (command.Sentence
          child: 
            (command.Simple
              blame_tok: <read>
              more_env: []
              words: [{<read>} {<line>}]
              redirects: []
              do_fork: T
            )
          terminator: <Id.Op_Semi _>
        )
      ]
    )
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName line)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.WhileUntil
  keyword: <Id.KW_While while>
  cond: 
    (condition.Shell
      commands: [
        (command.Simple
          blame_tok: <read>
          more_env: []
          words: [{<read>} {<line>}]
          redirects: []
          do_fork: T
        )
      ]
    )
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName line)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)

while read line {
  echo $line
}
OK
(command.WhileUntil
  keyword: <Id.KW_While while>
  cond: 
    (condition.Shell
      commands: [
        (command.Sentence
          child: 
            (command.Simple
              blame_tok: <read>
              more_env: []
              words: [{<read>} {<line>}]
              redirects: []
              do_fork: T
            )
          terminator: <Id.Op_Semi _>
        )
      ]
    )
  body: 
    (command.DoGroup
      left: <Id.KW_Do do>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName line)}]
          redirects: []
          do_fork: T
        )
      ]
      right: <Id.KW_Done done>
    )
  redirects: []
)
(command.WhileUntil
  keyword: <Id.KW_While while>
  cond: 
    (condition.Shell
      commands: [
        (command.Simple
          blame_tok: <read>
          more_env: []
          words: [{<read>} {<line>}]
          redirects: []
          do_fork: T
        )
      ]
    )
  body: 
    (BraceGroup
      left: <Id.Lit_LBrace '{'>
      children: [
        (command.Simple
          blame_tok: <echo>
          more_env: []
          words: [{<echo>} {($ Id.VSub_DollarName line)}]
          redirects: []
          do_fork: T
        )
      ]
      redirects: []
      right: <Id.Lit_RBrace '}'>
    )
  redirects: []
)

while read \
  line {
  echo $line
}
OK
OK  test-while-loop

test/ysh-ify.sh: 22 tests passed.
#!/usr/bin/env bash
#
# Usage:
#   build/cpp.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source build/common.sh  # CLANGXX

proc compile-quickly {
  ### For the fast possible development experience

  if test -f $CLANGXX {
    ninja _bin/clang-dbg/oils-for-unix
  } else {
    echo ""
    echo " Error: Unable to locate clang at ($CLANGXX)"
    echo ""
    echo "        To install clang at the specified path, run the following commands:"
    echo ""
    echo "        deps/from-binary.sh download-clang"
    echo "        deps/from-binary.sh extract-clang"
    echo ""
  }
}

proc compiler-trace-build {
  ### Output _build/obj/clang-dbg/*.json files

  local variant=${1:-dbg}'

  # Only clang supports -ftime-trace
  CXXFLAGS='-ftime-trace'' ninja _bin/clang-$variant/oils-for-unix
}

@ARGV
    (DONE build/cpp.sh)
#!/usr/bin/env bash
#
# Usage:
#   build/clean.sh <function name>

set -o nounset
set -o pipefail

# Ignore errors caused by not being root
# set -o errexit

# To test building stdlib.
proc clean-pyc {
  # skip _chroot, _tmp, etc.  But delete __init__.pyc
  find . '(' -type d -a -name '_*' -a -prune ')' -o -name '*.pyc' -a -print |
    xargs --no-run-if-empty -- rm --verbose
}

proc py {
  rm -f --verbose *.so
  rm -r -f --verbose _devbuild _cache

  # These can be stale after renaming things
  clean-pyc
}

proc cpp {
  ### e.g. to time ninja build
  rm -r -f --verbose _bin _build _gen _release _test build.ninja

  clean-pyc

  # _release is for docs
}

proc all {
  rm -r -f --verbose _tmp 
  # TODO: the _deps dir should be obsolete, after removing devtools/release.sh
  # dep-benchmarks

  py
  cpp
}

# This is 'make clean' for the oil.ovm build.
#
# - Take care not to remove _build/oil/bytecode-opy.zip, etc.
# - There are no object files written now.
# - We're not cleaning _build/detect-config.* ?

proc source-tarball-build {
  rm -f -v _bin/oil.{ovm,ovm-dbg}
  # NOTE: delete ovm-opt, ovm-opt.{stripped,symbols}
  rm -f -v \
      _build/oil/{ovm-opt,ovm-dbg} \
      _build/oil/ovm-opt.{stripped,symbols}
}


if test $Argc -eq 0 {
  # clean all if no args
  all
} else {
  @ARGV
}
    (DONE build/clean.sh)
#!/usr/bin/env bash
#
# Usage:
#   build/dev-shell-test.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

source build/dev-shell.sh

proc log {
  echo @ARGV >& 2
}

proc banner {
  echo '  |' 
  echo "  | $[join(ARGV)]"
  echo '  |'
  echo
}

proc show-path {
  local var_name=$1
  echo "$var_name ="
  eval "echo \$$var_name" | sed 's/:/\n/g'
  echo
}

proc test-cli {
  banner "Testing command line"
  show-path PATH

  echo

  log "Testing re2c"
  re2c --help | head -n 2
  echo

  log "Testing cmark"
  echo '*bold*' | doctools/cmark.py
  echo

  log "Testing python3"
  which python3
  python3 -V
  echo
}

proc test-python2 {
  banner "Testing python2"

  # Can't do this because of vendor/typing.py issue.
  # log "Testing oils_for_unix.py"
  # bin/oils_for_unix.py --help | head -n 2

  bin/osh --help | head -n 2
  bin/ysh --help | head -n 2

  echo
}

proc test-python3 {
  banner "Testing python3"
  show-path PYTHONPATH

  log "Checking mycpp"
  mycpp/mycpp_main.py --help | head -n 2
  echo

  log "Checking pexpect"
  spec/stateful/interactive.py --help | head -n 2
  echo
}

proc test-R {
  banner "Testing R"
  show-path R_LIBS_USER

  which R 
  R --version
  echo

  devtools/R-test.sh test-r-packages
  echo
}

proc soil-run {
  test-cli
  test-python2
  test-python3
  test-R
}

@ARGV
    (DONE build/dev-shell-test.sh)
# Usage: source build/py2.sh
#
# Duplicates build/dev-shell.sh, for _bin/shwrap stubs
#
# IMPORTANT: sourced by _build/oils.sh, so it must remain POSIX SHELL

setvar ROOT_WEDGE_DIR = '/wedge/oils-for-unix.org'

# put 'python2' in $PATH
readonly _WEDGE_PY2_DIR=$ROOT_WEDGE_DIR/pkg/python2/2.7.18/bin
if test -d $_WEDGE_PY2_DIR {
  export PATH="$_WEDGE_PY2_DIR:$PATH"
}

    (DONE build/py2.sh)
#!/usr/bin/env bash
#
# Usage:
#   build/cpython-defs.sh <function name>
#
# Example:
#
#   # make clean tree of .c files
#   devtools/release.sh quick-oil-tarball
#   devtools/release.sh test-oil-tar  # can Ctrl-C this
#
#   build/cpython-defs.sh oil-py-names  # extract names
#   build/cpython-defs.sh filter-methods
#
# NOTE: 'build/ovm-compile.sh make-tar' is complex, so it's easier to just extract
# the tarball, even though it leads to a weird dependency.

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)
readonly REPO_ROOT

source build/common.sh  # $PY27
source build/dev-shell.sh  # R_LIBS_USER

readonly BASE_DIR=_tmp/cpython-defs

# Could be published in metrics?
readonly PY_NAMES=_tmp/oil-py-names.txt

# Print the .py files in the tarball in their original locations.  For slimming
# down the build.  Similar to build/metrics.sh linecounts-pydeps.
# Hm that doesn't seem to duplicate posixpath while this does?
proc oil-py-deps {
  cat _build/oil/opy-app-deps.txt | awk ' $1 ~ /\.py$/ { print $1 }'
}

proc oil-py-names {
  time oil-py-deps | xargs bin/opyc lex-names | sort | uniq > $PY_NAMES

  wc -l $PY_NAMES
}

# NOTE: We can replace os with posix.  Will save 700 lines of code, 25K + 25K.
# os.getenv() is a trivial wrapper around os.environ.get().  It gets
# initialized in posixmodule.c.
proc os-module-deps {
  #oil-py-deps | xargs egrep --no-filename -o '\bos\.[a-z]+' */*.py | sort | uniq -c |sort -n
  oil-py-deps | xargs egrep -l '\bos\.'
}

# TODO:
# Write to a separate file like _build/pydefs/intobject.include
# #ifdef OVM_MAIN
# #include "intobject.include"
# #else
# ...
# #end
#
# Should those files be checked in an edited by hand?  Or join them somehow
# with oil-symbols.txt?
# I think this is hard because of METHODS.
# Maybe you should have a config file that controls it.  It takes a .include
# file and then whitelist/blacklist, and then generates a new one.
# could put it in build/pydefs-config.txt
#
# And then reprint the PyMethoDef without docstrings?  It shouldn't be that
# hard to parse.  You can almost do it with a regex, since commas don't appear
# in the string.

proc extract-methods {
  local path_prefix=$1  # to strip
  shift

  local edit_list=$BASE_DIR/method-edit-list.txt

  # NOTE: PyMemberDef is also interesting, but we don't need it for the build.
  gawk -v path_prefix_length=${#path_prefix} -v edit_list=$edit_list '
  /static.*PyMethodDef/ {
    if (printing != 0) {
      printf("%s:%d Expected not to be printing\n", FILENAME, FNR) > "/dev/stderr";
      exit 1;
    }
    # NOTE: We had to adjust stringobject.c and _weakref.c so that the name is
    # on one line!  Not a big deal.
    if (match($0, /static.*PyMethodDef ([a-zA-Z0-9_]+)\[\]/, m)) {
      def_name = m[1];
    } else {
      printf("%s:%d Could not parse declaration name\n",
             FILENAME, FNR) > "/dev/stderr";
      exit 1;
    }
    printing = 1;
    line_begin = FNR;

    rel_path = substr(FILENAME, path_prefix_length + 1);
    if (!found[FILENAME]) {
      # This special line seems to survive the preprocessor?
      printf("\n");
      printf("FILE %s\n", rel_path);
      printf("\n");

      printf("Filtering %s\n", FILENAME) > "/dev/stderr";
      found[FILENAME] = 1  # count number of files that have matches
    }
  }

  printing { print }

  # Looking for closing brace (with leading space)

  /^[:space:]*\}/ && printing {
    # Print the edit list for #ifdef #endif.
    line_end = FNR;
    printf("%s %s %d %d\n", rel_path, def_name, line_begin, line_end) > edit_list;
    printing = 0;
  }

  END {
    for (name in found) {
      num_found++;
    }
    printf("extract-methods.awk: Found definitions in %d out of %d files\n",
           num_found, ARGC) > "/dev/stderr";
  }
  ' @ARGV
}

proc preprocess {
  # TODO: Use PREPROC_FLAGS from build/ovm-compile.sh.
  # - What about stuff in pyconfig.h?
  # - Hack to define WTERMSIG!  We really need to include <sys/wait.h>, but
  # that causes parse errors in cpython_defs.py.  Really we should get rid of
  # this whole hack!
  # - WIFSTOPPED is another likely thing...
  gcc -I $PY27 -E -D OVM_MAIN -D WTERMSIG -
}

readonly TARBALL_ROOT=$(echo _tmp/oil-tar-test/oil-*)

proc extract-all-methods {
  echo '#include "pyconfig.h"'
  # 52 different instances.  Sometimes multiple ones per file.
  find $TARBALL_ROOT -type f -a -name '*.c' \
    | xargs -- $0 extract-methods "$TARBALL_ROOT/"
}

proc cpython-defs '{
  # Annoying: this depends on Oil for 'R' and 'C', then indirectly imports on
  # 'typing' module.
  PYTHONPATH='.:vendor'' build/cpython_defs.py @ARGV
}

proc filter-methods {
  local tmp=$BASE_DIR
  mkdir -p $tmp

  extract-all-methods > $tmp/extracted.txt
  cat $tmp/extracted.txt | preprocess > $tmp/preprocessed.txt

  local out_dir=build/oil-defs
  mkdir -p $out_dir

  #head -n 30 $tmp
  cat $tmp/preprocessed.txt | cpython-defs filter $PY_NAMES $out_dir

  echo
  find $out_dir -name '*.def' | xargs wc -l | sort -n

  echo
  wc -l $tmp/*.txt

  # syntax check
  #cc _tmp/filtered.c
}

proc edit-file {
  local rel_path=$1
  local def_name=$2
  local line_begin=$3
  local line_end=$4

  local def_path="${rel_path}/${def_name}.def"

  local tmp=_tmp/buf.txt

  # DESTRUCTIVE
  mv $rel_path $tmp

  gawk -v def_path=$def_path -v line_begin=$line_begin -v line_end=$line_end '
  NR == line_begin {
    print("#ifdef OVM_MAIN")
    printf("#include \"%s\"\n", def_path)
    print("#else")
    print  # print the PyMethodDef line {
    next
  }
  NR == line_end {
    print  # print the }
    print("#endif"); 
    next
  }
  # All other lines just get printed
  {
    print
  }
  ' $tmp > $rel_path

  echo "Wrote $rel_path"
}

proc edit-all {
  # Reversed so that edits to the same file work!  We are always inserting
  # lines.
  #tac $BASE_DIR/method-edit-list.txt | xargs -n 4 -- $0 edit-file

  # One-off editing
	grep typeobject.c $BASE_DIR/method-edit-list.txt \
    | tac | xargs -n 4 -- $0 edit-file

}

proc extract-types {
  local path_prefix=$1  # to strip
  shift

  local edit_list=$BASE_DIR/type-edit-list.txt

  # NOTE: PyMemberDef is also interesting, but we don't need it for the build.
  gawk -v path_prefix_length=${#path_prefix} -v edit_list=$edit_list '
  function maybe_print_file_header() {
    rel_path = substr(FILENAME, path_prefix_length + 1);
    if (!found[FILENAME]) {
      # This special line seems to survive the preprocessor?
      printf("\n");
      printf("FILE %s\n", rel_path);
      printf("\n");

      printf("Filtering %s\n", FILENAME) > "/dev/stderr";
      found[FILENAME] = 1  # count number of files that have matches
    }
  }

  /PyTypeObject.*=.*\{.*\}/ {
    if (printing != 0) {
      printf("%s:%d Expected not to be printing\n", FILENAME, FNR) > "/dev/stderr";
      exit 1;
    }
    // Found it all on one line
    print
    num_one_line_types++;
    next
  }

  /PyTypeObject.*=.*\{/ {
    if (printing != 0) {
      printf("%s:%d Expected not to be printing\n", FILENAME, FNR) > "/dev/stderr";
      exit 1;
    }
    printing = 1;
    line_begin = FNR;

    maybe_print_file_header()
    num_types++;
  }

  {
    if (printing) {
      print
    }
  }

  /^[:space:]*\}/ {
    if (printing) {
      # Print the edit list for #ifdef #endif.
      line_end = FNR;
      printf("%s %s %d %d\n", rel_path, def_name, line_begin, line_end) > edit_list;
      printing = 0;
    }
  }

  END {
    for (name in found) {
      num_found++;
    }
    printf("extract-types.awk: Found %d definitions in %d files (of %d files)\n",
           num_types, num_found, ARGC) > "/dev/stderr";
    printf("extract-types.awk: Also found %d types on one line\n",
           num_one_line_types) > "/dev/stderr";
  }
  ' @ARGV
}

proc extract-all-types {
  find $TARBALL_ROOT -type f -a -name '*.c' \
    | xargs -- $0 extract-types "$TARBALL_ROOT/"
}

#
# Analysis
#

readonly METRICS_DIR=_tmp/metrics/cpython-defs

# Show current Oil definitions literally.
proc show-oil {
  find build/oil-defs -name '*.def' | xargs cat | less
}

# Show in a contenses format.
proc methods-audit {
  mkdir -p $METRICS_DIR
  cat $BASE_DIR/preprocessed.txt | cpython-defs audit $PY_NAMES \
    | tee _tmp/methods.txt

  wc -l _tmp/methods.txt
}

proc methods-tsv {
  mkdir -p $METRICS_DIR
  local out=$METRICS_DIR/methods.tsv
  cat $BASE_DIR/preprocessed.txt | cpython-defs tsv $PY_NAMES | tee $out
}

proc _report {
  metrics/cpython-defs.R @ARGV
}

proc report {
  _report metrics $METRICS_DIR
}

proc run-for-release {
  # Repeats what we did at the beginning of the release process, because _tmp/
  # was deleted
  oil-py-names
  filter-methods

  methods-tsv
  report | tee $METRICS_DIR/overview.txt
}

proc unfiltered {
  cpython-defs filtered | sort > _tmp/left.txt
  awk '{print $1}' $BASE_DIR/edit-list.txt \
    | egrep -o '[^/]+$' \
    | sort | uniq > _tmp/right.txt
  diff -u _tmp/{left,right}.txt
}


@ARGV
    (DONE build/cpython-defs.sh)
#!/usr/bin/env bash
#
# Build stamp
#
# Usage:
#   build/stamp.sh <function name>

setvar REPO_ROOT = $(cd $(dirname $0)/..; pwd)
source build/common.sh

proc write-release-date {
  mkdir -p _build  # Makefile makes this, but scripts/release.sh needs it too

  # Write a readable, sortable date that is independent of time zone.
  date --utc --rfc-3339 seconds > _build/release-date.txt
}

proc write-git-commit {
  ### Write git commit only if we need to
  # Ninja works on timestamps, so we don't want to cause rebuilds.

  local out=_build/git-commit.txt
  mkdir -p _build

  # This check is not quite accurate, since you can modify a file, and then run
  # Ninja without running build/py.sh all, which calls this function.  But it's
  # better than nothing.
  if ! git diff --quiet {
    log 'Working tree is dirty'

    #rm -f -v $out
    echo '<unknown>' > $out
    return
  }

  local hash
  setvar hash = $(git log -n 1 --pretty='format:%H')

  # Don't disturb the timestamp if it exists!
  if test -f $out {
    local old
    read -r old < $out

    if test $old = $hash {
      log "Unchanged git commit $hash, skipping $out"
      return
    } else {
      log "Overwriting $out with $hash"
    }
  }

  echo $hash > $out
  #log "Wrote $out ($hash)"
}

proc gen-cpp {
  ### For printing out in --version

  local in=$1   # stamp from Ninja
  local h_out=$2
  local cc_out=$3

  local hash
  read -r hash < $in
  #log hash=$hash

  cat >$h_out <<< """
extern const char* gCommitHash;
"""

  cat >$cc_out <<< """
const char* gCommitHash = "$hash";
"""
}

@ARGV
    (DONE build/stamp.sh)
#!/usr/bin/env bash
#
# Actions invoked by build.ninja, which is generated by ./NINJA-config.sh.
#
# It's distributed with the tarball and invoked by _build/oils.sh, so
# it's written in a /bin/sh style.  But we're not using /bin/sh yet.
#
# And some non-Ninja wrappers.
#
# Usage:
#   build/ninja-rules-cpp.sh <function name>
#
# Env variables:
#   BASE_CXXFLAGS=        default flags passed to all compiler invocations
#   CXXFLAGS=             additional flags
#   OILS_CXX_VERBOSE=1    show compiler command lines
#   TIME_TSV_OUT=file     compile_one and link output rows to this TSV file

set -o nounset
set -o errexit
# For /bin/sh portability
#eval 'set -o pipefail'

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source build/common.sh  # for $BASE_CXXFLAGS
source build/dev-shell.sh  # python2 in $PATH

# for HAVE_READLINE, READLINE_DIR, and STRIP_FLAGS
if ! source _build/detected-config.sh {
  die "Can't find _build/detected-config.sh.  Run './configure'"
}

proc line_count {
  local out=$1
  shift  # rest are inputs
  wc -l @ARGV | sort -n | tee $out
}

#
# Mutable GLOBALS
#

setvar cxx = ''''         # compiler
setvar flags = ''''       # compile flags
setvar link_flags = ''''  # link flags

#
# Functions to set them
#

proc setglobal_cxx {
  local compiler=$1

  case (compiler) {
    (clang { setvar cxx = "$CLANGXX"  }
    # Note: we could get rid of this "alias", and use 'c++' everywhere
    (cxx {   setvar cxx = ''c++''     }

    # e.g. could be cosmoc++
    (* {     setvar cxx = "$compiler" }
  }
}

proc setglobal_compile_flags {
  ### Set flags based on $variant $more_cxx_flags and $dotd

  local variant=$1
  local more_cxx_flags=$2
  local dotd=${3:-}

  # flags from Ninja/shell respected
  setvar flags = ""$BASE_CXXFLAGS -I $REPO_ROOT $more_cxx_flags""

  # Flags from env
  # Similar to
  # - GNU make - https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html
  #   CXXFLAGS "Extra flags to give to the C++ compiler"
  # - CMake - https://cmake.org/cmake/help/latest/envvar/CXXFLAGS.html 
  #   "Add default compilation flags to be used when compiling CXX (C++) files."

  local env_flags=${CXXFLAGS:-}
  if test -n $env_flags {
    setvar flags = ""$flags $env_flags""
  }

  if test -n $READLINE_DIR {
    setvar flags = ""$flags -I${READLINE_DIR}/include""
  }

  case (variant) {
    *+bumpleak|*+bumproot {
      }
    * {
      setvar flags = ""$flags -D MARK_SWEEP""
      }
  }

  # First half of variant: what affects ALL translation units

  case (variant) {
    dbg* {
      setvar flags = ""$flags -O0 -g""
      }

    asan* {
      # CLEAN_PROCESS_EXIT avoids spurious memory leaks
      setvar flags = ""$flags -O0 -g -fsanitize=address -D CLEAN_PROCESS_EXIT""
      }

    tsan* {
      setvar flags = ""$flags -O0 -g -fsanitize=thread""
      }

    ubsan* {
      # Extra flag to make it fatal
      # https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html

      setvar flags = ""$flags -O0 -g -fsanitize=undefined -fno-sanitize-recover=null""
      }

    opt* {
      setvar flags = ""$flags -O2 -g -D OPTIMIZED""
      }

    coverage* {
      # source-based coverage is more precise than say sanitizer-based
      # https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
      setvar flags = ""$flags -O0 -g -fprofile-instr-generate -fcoverage-mapping""
      }

    uftrace* {
      # -O0 creates a A LOT more data.  But sometimes we want to see the
      # structure of the code.
      # NewStr(), OverAllocatedStr(), StrFromC() etc. are not inlined
      # Ditto vector::size(), std::forward, len(), etc.

      local opt='-O0'
      #local opt='-O2'
      setvar flags = ""$flags $opt -g -pg""
      }

    (* {
      die "Invalid variant $variant"
      }
  }

  # for cxx-dbg32, cxx-opt32+bumpleak, etc.
  case (variant) {
    *32* {
      setvar flags = ""$flags -m32""
      }
  }

  # OPTIONAL second half of variant: for the application

  case (variant) {
    *+gcalways {
      setvar flags = ""$flags -D GC_ALWAYS""
      }

    *+tcmalloc {
      setvar flags = ""$flags -D TCMALLOC""
      }

    *+bumpleak {
      setvar flags = ""$flags -D BUMP_LEAK""
      }
    *+bumproot {
      setvar flags = ""$flags -D BUMP_LEAK -D BUMP_ROOT""
      }

    *+bumpsmall {
      # the pool allocator should approximate opt+bumpsmall (which doesn't support GC)
      setvar flags = ""$flags -D BUMP_ROOT -D BUMP_SMALL -D NO_POOL_ALLOC""
      }

    *+nopool {
      setvar flags = ""$flags -D NO_POOL_ALLOC""
      }
  }

  # needed to strip unused symbols
  # https://stackoverflow.com/questions/6687630/how-to-remove-unused-c-c-symbols-with-gcc-and-ld

  # Note: -ftlo doesn't do anything for size?

  setvar flags = ""$flags -fdata-sections -ffunction-sections""

  # https://ninja-build.org/manual.html#ref_headers
  if test -n $dotd {
    setvar flags = ""$flags -MD -MF $dotd""
  }
}

proc setglobal_link_flags {
  local variant=$1

  case (variant) {
    # Must REPEAT these flags, otherwise we lose sanitizers / coverage
    asan* {
      setvar link_flags = ''-fsanitize=address''
      }

    tcmalloc {
      # Need to tell the dynamic loader where to find tcmalloc
      setvar link_flags = ''-ltcmalloc -Wl,-rpath,/usr/local/lib''
      }

    tsan {
      setvar link_flags = ''-fsanitize=thread''
      }
    ubsan* {
      setvar link_flags = ''-fsanitize=undefined''
      }
    coverage* {
      setvar link_flags = ''-fprofile-instr-generate -fcoverage-mapping''
      }
  }

  case (variant) {
    # TODO: 32-bit variants can't handle -l readline right now.
    *32* {
      setvar link_flags = ""$link_flags -m32""
      }

    * {
      if test $HAVE_READLINE = 1 {
        setvar link_flags = ""$link_flags -lreadline""
      }
      if test -n $READLINE_DIR {
        setvar link_flags = ""$link_flags -L${READLINE_DIR}/lib""
      }
      }
  }

  if test -n ${STRIP_FLAGS:-} {
    setvar link_flags = ""$link_flags -Wl,$STRIP_FLAGS""
  }
}

proc compile_one {
  ### Compile one translation unit.  Invoked by build.ninja

  local compiler=$1
  local variant=$2
  local more_cxx_flags=$3
  local in=$4
  local out=$5
  local dotd=${6:-}  # optional .d file

  setglobal_compile_flags $variant $more_cxx_flags $dotd

  case (out) {
    (_build/preprocessed/* {
      setvar flags = ""$flags -E""
      }

	 # DISABLE spew for mycpp-generated code.  mycpp/pea could flag this at the
   # PYTHON level, rather than doing it at the C++ level.
   (_build/obj/*/_gen/bin/oils_for_unix.mycpp.o {
     setvar flags = ""$flags -Wno-unused-variable -Wno-unused-but-set-variable""
     }
  }

  # TODO: exactly when is -fPIC needed?  Clang needs it sometimes?
  if test $compiler = 'clang' && test $variant != 'opt' {
    setvar flags = ""$flags -fPIC""
  }

  # this flag is only valid in Clang, doesn't work in continuous build
  if test $compiler = 'clang' {
    setvar flags = ""$flags -ferror-limit=10""
  }

  setglobal_cxx $compiler

  if test -n ${OILS_CXX_VERBOSE:-} {
    echo '__' $cxx $flags -o $out -c $in >&2
  }

  # Not using arrays because this is POSIX shell
  local prefix=''
  if test -n ${TIME_TSV_OUT:-} {
    setvar prefix = ""benchmarks/time_.py --tsv --out $TIME_TSV_OUT --append --rusage --field compile_one --field $out --""
  }

  $prefix $cxx $flags -o $out -c $in
}

proc link {
  ### Link a binary.  Invoked by build.ninja

  local compiler=$1
  local variant=$2
  local out=$3
  shift 3
  # rest are inputs

  setglobal_link_flags $variant

  setglobal_cxx $compiler

  local prefix=''
  if test -n ${TIME_TSV_OUT:-} {
    setvar prefix = ""benchmarks/time_.py --tsv --out $TIME_TSV_OUT --append --rusage --field link --field $out --""
  }

  if test -n ${OILS_CXX_VERBOSE:-} {
    echo "__ $prefix $cxx -o $out $[join(ARGV)] $link_flags" >&2
  }
  # IMPORTANT: Flags like -ltcmalloc have to come AFTER objects!  Weird but
  # true.
  $prefix $cxx -o $out @ARGV $link_flags
}

proc compile_and_link {
  ### This function is no longer used; use 'compile_one' and 'link'

  local compiler=$1
  local variant=$2
  local more_cxx_flags=$3
  local out=$4
  shift 4

  setglobal_compile_flags $variant $more_cxx_flags ""  # no dotd

  setglobal_link_flags $variant

  setglobal_cxx $compiler

  if test -n ${OILS_CXX_VERBOSE:-} {
    echo "__ $cxx -o $out $flags $[join(ARGV)] $link_flags" >&2
  }

  $cxx -o $out $flags @ARGV $link_flags
}

proc strip_ {
  ### Invoked by ninja

  local in=$1
  local stripped=$2
  local symbols=${3:-}

  strip -o $stripped $in

  if test -n $symbols {
    objcopy --only-keep-debug $in $symbols
    objcopy --add-gnu-debuglink=$symbols $stripped
  }
}

proc symlink {
  local dir=$1
  local in=$2
  local out=$3

  cd $dir
  ln -s -f -v $in $out
}

# test/cpp-unit.sh sources this
if test $(basename $0) = 'ninja-rules-cpp.sh' {
  @ARGV
}
    (DONE build/ninja-rules-cpp.sh)
#!/usr/bin/env bash
#
# Build the dev version of Oil on CPython.
# This is in contrast to oils-for-unix and the oil.ovm build.
#
# Usage:
#   build/py.sh <function name>

set -o nounset
set -o pipefail
set -o errexit
shopt -s strict:all 2>/dev/null || true  # dogfood for OSH

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)
readonly REPO_ROOT

source build/common.sh       # log, $CLANGXX
source devtools/run-task.sh
# TODO: We could have the user run deps/from-apt.sh directly

if test -z ${IN_NIX_SHELL:-} {
  source build/dev-shell.sh  # to run 're2c'
}

export PYTHONPATH='.:vendor/'

proc ubuntu-deps {
  ### Alias for backward compatility
  build/deps.sh install-ubuntu-packages
}

# This is what Python uses on OS X.
#
# https://www.thrysoee.dk/editline/
proc install-libedit {
  sudo apt install libedit-dev
}

proc libedit-flags {
  pkg-config --libs --cflags libedit
}

proc install-py2 {
  set -x

  # pyyaml: for yaml2json
  # typing: because the build/cpython-defs tool
  # flake8: for linting
  # pygments: for doc rendering
  python2 -m pip install pyyaml typing flake8 pygments

  # not sure why this requires sudo and pip2 doesn't
  # this doesn't work on our code
  # sudo pip3 install flake8
}

proc install-py3 {
  pip3 install mypy
}

proc destroy-pip {
  rm -r -f -v ~/.cache/pip ~/.local/lib/python2.7
}

# 2021-04: I have no idea why I need this on my Xenial machine
# but the Travis continuous build doesn't need it.
proc install-old-flake8 {
  # Found by bisection and inspection of MY HOME DIR.  It makes the pip
  # dependency resolver "work"...

  pip install 'configparser==4.0.2'
  pip install 'flake8==3.7.9'

  # Test default version
  unset PYTHONPATH
  ~/.local/bin/flake8 --version
}

# Needed for the release process, but not the dev process.
# TODO: remove in favor of wedges in deps/
proc release-ubuntu-deps {
  # For the release to run test/report.R, you need r-base-core too.
  # cloc is used for line counts
  # valgrind/cachegrind for benchmarks
  sudo apt-get install r-base-core cloc valgrind
}

# 3/2021: For installing dplyr on Ubuntu Xenial 16.04 LTS, which has an old R version
# Following these instructions
# https://cloud.r-project.org/bin/linux/ubuntu/README.html

# 5/2021: Upgraded to Ubuntu Bionic, which has R 3.4.4.  So it looks like I no
# longer need this.
#
# 2/2023: I need this again because R 3.4.4 is too old for dplyr.
#
# https://cloud.r-project.org/bin/linux/ubuntu/

proc _install-new-r {
  # update indices
  apt update -qq

  # install two helper packages we need
  apt install --no-install-recommends software-properties-common dirmngr

  # import the signing key (by Michael Rutter) for these repo
  apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9

  # add the R 4.0 repo from CRAN -- adjust 'focal' to 'groovy' or 'bionic' as needed

  local ubuntu_version
  setvar ubuntu_version = $(lsb_release -cs)
  add-apt-repository "deb https://cloud.r-project.org/bin/linux/ubuntu $ubuntu_version-cran40/"

  # Hm I had to run this manually and I got R 4.0
  # 2021-04: Hm this had to be run twice
  apt install --no-install-recommends r-base
}

proc install-new-r {
  sudo $0 _install-new-r @ARGV
}

proc const-mypy-gen {
  local out=_devbuild/gen/id_kind_asdl.py
  frontend/consts_gen.py mypy > $out
  log "  (frontend/consts_gen) -> $out"

  setvar out = '_devbuild/gen/id_kind.py'
  frontend/consts_gen.py py-consts > $out
  log "  (frontend/consts_gen) -> $out"
}

proc option-mypy-gen {
  local out=_devbuild/gen/option_asdl.py
  frontend/option_gen.py mypy > $out
  log "  (frontend/option_gen) -> $out"
}

proc flag-gen-mypy {
  local out=_devbuild/gen/arg_types.py
  frontend/flag_gen.py mypy > $out
  log "  (frontend/flag_gen) -> $out"
}

# Helper
proc gen-asdl-py {
  local asdl_path=$1  # e.g. osh/osh.asdl

  local name
  setvar name = $(basename $asdl_path .asdl)

  local tmp=_tmp/${name}_asdl.py
  local out=_devbuild/gen/${name}_asdl.py

  # abbrev module is optional
  asdl/asdl_main.py mypy @ARGV > $tmp

  # BUG: MUST BE DONE ATOMICALLY; otherwise the Python interpreter can
  # import an empty file!
  mv $tmp $out

  log "$asdl_path -> (asdl_main) -> $out"
}

proc py-codegen {
  # note: filename must come first
  # hnode.asdl has REQUIRED fields so it's --py-init-N
  gen-asdl-py 'asdl/hnode.asdl' --no-pretty-print-methods --py-init-N 

  gen-asdl-py 'frontend/types.asdl'
  # depends on syntax.asdl
  gen-asdl-py 'core/runtime.asdl'
  gen-asdl-py 'core/value.asdl'
  gen-asdl-py 'tools/find/find.asdl'

  const-mypy-gen  # depends on bool_arg_type_e, generates Id_t

  # does __import__ of syntax_abbrev.py, which depends on Id.  We could use the
  # AST module later?
  # depends on syntax_asdl
  gen-asdl-py 'frontend/syntax.asdl' 'frontend.syntax_abbrev'

  option-mypy-gen
  flag-gen-mypy

  # Experiment
  gen-asdl-py 'yaks/yaks.asdl'

  # For tests
  gen-asdl-py 'mycpp/examples/expr.asdl'
}

proc py-asdl-examples {
  # dependency of typed_demo
  gen-asdl-py 'asdl/examples/demo_lib.asdl'
  gen-asdl-py 'asdl/examples/typed_demo.asdl'

  gen-asdl-py 'asdl/examples/shared_variant.asdl'
  gen-asdl-py 'asdl/examples/typed_arith.asdl' 'asdl.examples.typed_arith_abbrev'
}

proc oil-cpp {
  ### STUB for backward compatibility

  build/cpp.sh all
}

proc py-ext {
  ### Build a Python extension

  local name=$1
  local setup_script=$2

  log "  ($setup_script) -> $name.so"

  local arch
  setvar arch = $(uname -m)

  # global opts come first
  $setup_script --quiet build_ext --inplace

  #file $name.so
}

proc py-ext-test {
  ### Run a test and log it

  # TODO: Fold this into some kind of Ninja test runner?
  # Or just rely on test/unit.sh all?

  local test_path=$1  # Or a function
  shift

  local log_path=_test/unit/$test_path.log
  mkdir -p $(dirname $log_path)

  set +o errexit
  $test_path @ARGV >$log_path 2>&1
  local status=$?
  set -o errexit

  if test $status -eq 0 {
    log "OK $log_path"
  } else {
    echo
    cat $log_path
    echo
    die "FAIL $log_path"
  }
}

proc pylibc {
  rm -f libc.so

  py-ext libc pyext/setup_libc.py
  py-ext-test pyext/libc_test.py @ARGV
}

proc fanos {
  rm -f fanos.so

  py-ext fanos pyext/setup_fanos.py
  py-ext-test pyext/fanos_test.py @ARGV
}

proc fastfunc {
  rm -f fastfunc.so

  py-ext fastfunc pyext/setup_fastfunc.py
  py-ext-test pyext/fastfunc_test.py @ARGV
}

#
# For frontend/match.py
#

proc lexer-gen { frontend/lexer_gen.py @ARGV; }

proc print-regex { lexer-gen print-regex; }
proc print-all { lexer-gen print-all; }

# Structure:
#
# _gen
#   frontend/
#     id.asdl_c.h
#     types.asdl_c.h
#     match.re2c.h
# _build/
#   tmp/
#     frontend/
#       match.re2c.in
#     bin/
#       oils_for_unix_raw.mycpp.cc

# re2c native.
proc osh-lex-gen-native {
  local in=$1
  local out=$2
  # Turn on all warnings and make them native.
  # The COMMENT state can match an empty string at the end of a line, e.g.
  # '#\n'.  So we have to turn that warning off.
  re2c -W -Wno-match-empty-string -Werror -o $out $in
}

proc fastmatch {
  local gen_dir=_gen/frontend
  mkdir -p _gen/_tmp $gen_dir

  # C version of frontend/types.asdl
  local out=$gen_dir/types.asdl_c.h
  asdl/asdl_main.py c frontend/types.asdl @ARGV > $out
  log "  (asdl_main c) -> $out"

  # C version of id_kind
  local out=$gen_dir/id_kind.asdl_c.h
  frontend/consts_gen.py c > $out
  log "  (frontend/consts_gen c) -> $out"

  # Fast matcher
  local tmp=_gen/_tmp/match.re2c-input.h
  local out=_gen/frontend/match.re2c.h
  lexer-gen c > $tmp
  log "  (lexer_gen) -> $tmp"

  osh-lex-gen-native $tmp $out
  log "$tmp -> (re2c) -> $out"
}

proc fastlex {
  fastmatch

  # Why do we need this?  It gets stale otherwise.
  rm -f fastlex.so

  py-ext fastlex pyext/setup_fastlex.py
  py-ext-test pyext/fastlex_test.py
}

proc line-input {
  # Why do we need this?  It gets stale otherwise.
  rm -f line_input.so

  py-ext line_input pyext/setup_line_input.py
  py-ext-test pyext/line_input_test.py
}

proc posix_ {
  rm -f posix_.so

  py-ext posix_ pyext/setup_posix.py
  py-ext-test pyext/posix_test.py
}

proc py-source {
  ### Generate Python source code

  mkdir -p _tmp _devbuild/gen

  # need -r because Python 3 puts a __pycache__ here
  log 'Removing _devbuild/gen/*'
  rm -r -f _devbuild/gen/*

  # So modules are importable.
  touch _devbuild/__init__.py  _devbuild/gen/__init__.py

  py-codegen  # depends on Id

  # Only for testing.
  py-asdl-examples

  # Needed on Travis.
  ysh-grammar
  find-grammar
  demo-grammar  # for mycpp/examples/pgen2_demo
}

# No fastlex, because we don't want to require re2c installation.
proc py-extensions {
  pylibc
  line-input
  posix_
  fanos
  fastfunc
}

proc minimal {
  build/stamp.sh write-git-commit

  py-source
  py-extensions

  cat <<< """

*****
'$0 minimal' succeeded

  It allows you to run and modify Oil quickly, but the lexer will be slow and
  the help builtin won't work.

'$0 all' requires re2c and libcmark.so.  (Issue #513 is related, ask
on #oil-dev)
*****
"""
}

proc ysh-grammar {
  mkdir -p _gen/ysh
  touch _gen/__init__.py _gen/ysh/__init__.py

  ysh/grammar_gen.py py ysh/grammar.pgen2 _devbuild/gen
}

proc find-grammar {
  ysh/grammar_gen.py py tools/find/find.pgen2 _devbuild/gen
}

proc demo-grammar {
  ysh/grammar_gen.py py mycpp/examples/arith.pgen2 _devbuild/gen
}

proc time-helper {
  local out=${1:-_devbuild/bin/time-helper}
  local in=benchmarks/time-helper.c

  mkdir -p $(dirname $out)

  cc -std=c99 -Wall -o $out $in
  log "  CC $in"
}

proc all {
  rm -f *.so  # 12/2019: to clear old symlinks, maybe get rid of

  build/stamp.sh write-git-commit

  py-source
  py-extensions  # no re2c

  # requires re2c: deps/from-tar.sh layer-re2c
  fastlex
  time-helper

  # help topics and chapter links are extracted from doc/ref
  build/doc.sh all-ref
}

proc gitpod-minimal {
  ubuntu-deps '-y'  # skip prompt
  minimal 
  test/spec.sh smoke

  set -x
  bin/osh -c 'echo hi'
}

if test $(basename $0) = 'py.sh' {
  run-task @ARGV
}
    (DONE build/py.sh)
#!/usr/bin/env bash
#
# Calculate and filter deps of Python apps.
#
# Usage:
#   build/dynamic-deps.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source mycpp/common.sh  # $MYPY_REPO

readonly PY_PATH='.:vendor/'

# Temporary
readonly DIR=_build/NINJA

# In git
readonly FILTER_DIR='prebuilt/dynamic-deps'

proc make-egrep {
  # match chars until # or space, and line must be non-empty
  gawk '
  match($0, /([^# ]*)/, m) {
    contents = m[0]
    if (contents) {  # skip empty lines
      print(contents)
    }
  }
  '
}

proc write-filters {
  ### Write filename filters in the egrep -f format

  # For ./NINJA-config.sh to use.
  # This style lets us add comments.

  # For asdl.asdl_main and other tools
  make-egrep >$FILTER_DIR/filter-py-tool.txt <<< '''
__init__.py
typing.py  # vendor/typing.py isn't imported normally
'''

  # Don't typecheck these files.

  make-egrep >$FILTER_DIR/filter-typecheck.txt <<< '''
__init__.py
typing.py

# OrderedDict is polymorphic
pylib/collections_.py

# lots of polymorphic stuff etc.
mycpp/mylib.py

# TODO: move or remove these
tools/deps.py
tools/readlink.py
'''

  # On top of the typecheck filter, exclude these from translation.  They are
  # not inputs to mycpp.

  make-egrep >$FILTER_DIR/filter-translate.txt <<< '''
# generated code shouldn't be translated
_devbuild/
_gen/

# definitions that are used by */*_gen.py
.*_def\.py
.*_spec\.py

asdl/py.*           # pybase.py ported by hand to C++

core/py.*           # pyos.py, pyutil.py ported by hand to C++
core/optview\.py    # core/optview_gen.py

data_lang/py.*      # pyj8.py

frontend/py.*\.py   # py_readline.py ported by hand to C++
frontend/consts.py  # frontend/consts_gen.py
frontend/match.py   # frontend/lexer_gen.py

pgen2/grammar.py
pgen2/pnode.py

# should be py_path_stat.py, because it's ported by hand to C++
pylib/path_stat.py

# should be py_bool_stat.py, because it's ported by hand to C++
osh/bool_stat.py

tea/
'''

  wc -l $FILTER_DIR/filter-*
}

proc repo-filter {
  ### Select files from the dynamic_deps.py output

  # select what's in the repo; eliminating stdlib stuff
  # eliminate _cache for mycpp running under Python-3.10
  fgrep -v "$REPO_ROOT/_cache" | fgrep $REPO_ROOT | awk '{ print $2 }' 
}

proc exclude-filter {
  ### Exclude repo-relative paths

  local filter_name=$1

  egrep -v -f $FILTER_DIR/filter-$filter_name.txt
}

proc mysort '{
  LC_ALL=C' sort
}

#
# Programs
#

proc py-tool {
  local py_module=$1

  local dir=$DIR/$py_module
  mkdir -p $dir"

  PYTHONPATH=$PY_PATH" /usr/bin/env python2 \
    build/dynamic_deps.py py-manifest $py_module \
    > $dir/all-pairs.txt

  cat $dir/all-pairs.txt | repo-filter | exclude-filter py-tool | mysort \
    > $dir/deps.txt

  echo "DEPS $dir/deps.txt"
}

# Code generators
proc list-gen {
  ls */*_gen.py
}

# mycpp and pea deps are committed to git instead of in _build/NINJA/ because
# users might not have Python 3.10

proc write-pea {
  # PYTHONPATH=$PY_PATH 
  local module='pea.pea_main'
  local dir=prebuilt/ninja/$module
  mkdir -p $dir

  source build/dev-shell.sh'  # python3

  # Can't use vendor/typing.py
  PYTHONPATH=.' python3 \
    build/dynamic_deps.py py-manifest $module \
  > $dir/all-pairs.txt

  cat $dir/all-pairs.txt | repo-filter | mysort | tee $dir/deps.txt

  echo
  echo $dir/*
}

proc write-mycpp {
  local module='mycpp.mycpp_main'
  local dir=prebuilt/ninja/$module
  mkdir -p $dir

  shell { source $MYCPP_VENV/bin/activate"
    PYTHONPATH=$REPO_ROOT:$REPO_ROOT/mycpp:$MYPY_REPO" maybe-our-python3 \
      build/dynamic_deps.py py-manifest $module > $dir/all-pairs.txt
  }

  cat $dir/all-pairs.txt \
    | grep -v oilshell/oil_DEPS \
    | repo-filter \
    | exclude-filter py-tool \
    | mysort \
    | tee $dir/deps.txt

  echo
  echo $dir/*
}

proc mycpp-example-parse {
  ### Manifests for mycpp/examples/parse are committed to git

  local dir=$DIR/parse
  mkdir -p $dir"

  PYTHONPATH=$PY_PATH" /usr/bin/env python2 \
    build/dynamic_deps.py py-manifest mycpp.examples.parse \
  > $dir/all-pairs.txt

  local ty=mycpp/examples/parse.typecheck.txt
  local tr=mycpp/examples/parse.translate.txt

  cat $dir/all-pairs.txt | repo-filter | exclude-filter typecheck | mysort > $ty

  cat $ty | exclude-filter translate > $tr

  wc -l $ty $tr

  #head $ty $tr
}

proc pea-hack {
  # Leave out help_.py for Soil
  grep -v '_devbuild/gen/help_meta.py' $DIR/bin.oils_for_unix/typecheck.txt \
    > pea/oils-typecheck.txt
}

# Sourced by NINJA-config.sh
if test $(basename $0) = 'dynamic-deps.sh' {
  @ARGV
}
    (DONE build/dynamic-deps.sh)
#!/usr/bin/env bash
#
# BACKWARD COMPATIBILITY 2022-08

source build/py.sh
    (DONE build/dev.sh)
# Sets $PATH to the locations of some precompiled binaries.
# An alternative to nix-shell.
#
# Usage:
#   source build/dev-shell.sh
#
# Note: assumes that $REPO_ROOT is $PWD.
#
# IMPORTANT: sourced by _build/oils.sh, so it must remain POSIX SHELL

setvar ROOT_WEDGE_DIR = '/wedge/oils-for-unix.org'
# Also in build/deps.sh
setvar USER_WEDGE_DIR = "~/wedge/oils-for-unix.org"

# put 'python2' in $PATH
readonly WEDGE_PY2_DIR=$ROOT_WEDGE_DIR/pkg/python2/2.7.18/bin
if test -d $WEDGE_PY2_DIR {
  export PATH="$WEDGE_PY2_DIR:$PATH"
}

# put 'python3' in $PATH
readonly WEDGE_PY3_DIR=$ROOT_WEDGE_DIR/pkg/python3/3.10.4/bin
# Unconditionally add it to PATH; otherwise build/deps.sh install-wedges won't
# work
export PATH="$WEDGE_PY3_DIR:$PATH"

readonly WEDGE_BLOATY_DIR=$ROOT_WEDGE_DIR/pkg/bloaty/1.1  # not in bin
if test -d $WEDGE_BLOATY_DIR {
  export PATH="$WEDGE_BLOATY_DIR:$PATH"
}

readonly WEDGE_RE2C_DIR=$ROOT_WEDGE_DIR/pkg/re2c/3.0/bin
if test -d $WEDGE_RE2C_DIR {
  export PATH="$WEDGE_RE2C_DIR:$PATH"
}

# uftrace must be installed by wedge?
readonly UFTRACE_WEDGE_DIR=$ROOT_WEDGE_DIR/pkg/uftrace/0.13/bin
if test -d $UFTRACE_WEDGE_DIR {
  export PATH="$UFTRACE_WEDGE_DIR:$PATH"
}

# FALLBACK without test/spec-bin: test/spec.sh link-busybox-ash
readonly ASH_SYMLINK_DIR="$PWD/_tmp/shells"
if test -d $ASH_SYMLINK_DIR {
  export PATH="$ASH_SYMLINK_DIR:$PATH"
}

# test/spec-bin.sh builds binaries
# This takes precedence over $ASH_SYMLINK_DIR
readonly SPEC_DIR="$PWD/../oil_DEPS/spec-bin"

if test -d $SPEC_DIR {
  export PATH="$SPEC_DIR:$PATH"
}

if test -d ~/R {
  # 2023-07: Hack to keep using old versions on lenny.local
  # In 2023-04, dplyr stopped supporting R 3.4.4 on Ubuntu Bionic
  # https://cran.r-project.org/web/packages/dplyr/index.html
  export R_LIBS_USER=~/R
} else {
  setvar R_LIBS_WEDGE = "~/wedge/oils-for-unix.org/pkg/R-libs/2023-04-18"
  export R_LIBS_USER=$R_LIBS_WEDGE
}

# So we can run Python 2 scripts directly, e.g. asdl/asdl_main.py
export PYTHONPATH='.'

# We can also run mycpp/mycpp_main.py directly
#
# But NOT bin/oils_for_unix.py (Python 2).  Those need to find our stripped down
# vendor/typing.py, but we CANNOT put vendor/ in $PYTHONPATH, because then
# mycpp would import it and fail.

readonly site_packages=lib/python3.10/site-packages

#readonly PY3_LIBS_VERSION=2023-07-27
# Use older version because containers aren't rebuild.  TODO: fix this
readonly PY3_LIBS_VERSION=2023-03-04

# Note: Version should match the one in build/deps.sh
readonly PY3_LIBS_WEDGE=$USER_WEDGE_DIR/pkg/py3-libs/$PY3_LIBS_VERSION/$site_packages
# Unconditionally add to PYTHONPATH; otherwise build/deps.sh install-wedges
# can't work in one shot
export PYTHONPATH="$PY3_LIBS_WEDGE:$PYTHONPATH"

setvar MYPY_VERSION = '0.780'
# TODO: would be nice to upgrade to newer version
#readonly MYPY_VERSION=0.971

# Containers copy it here
readonly MYPY_WEDGE=$USER_WEDGE_DIR/pkg/mypy/$MYPY_VERSION
if test -d $MYPY_WEDGE {
  export PYTHONPATH="$MYPY_WEDGE:$PYTHONPATH"
}

# Hack for misconfigured RC cluster!  Some machines have the empty string in
# their $PATH (due to some having CUDA and others not).
#
# TODO: I should fix the machines, and make this a FATAL error.  The $PATH
# leaks on purpose because we might want to run with nix-shell -- see
# test/spec-common.sh.
case (PATH) {
  *::* {
    setvar PATH = $(echo "$PATH" | sed 's/::/:/g')
    }
}
    (DONE build/dev-shell.sh)
#!/usr/bin/env bash
#
# Build oils-for-unix.
#
# Usage:
#   build/native.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)  # tsv-lib.sh uses this
source build/common.sh  # log

# Demo for the oils-for-unix tarball.
# Notes:
# - Does not rely on Ninja, which is for the dev build
# - It shouldn't require 'objcopy'
# - TODO: do this in the Soil 'cpp' task

proc tarball-demo {
  mkdir -p _bin

  ./configure

  time _build/oils.sh '' '' SKIP_REBUILD

  local bin=_bin/cxx-opt-sh/oils-for-unix.stripped

  ls -l $bin

  echo
  echo "You can now run $bin.  Example:"
  echo

  set -o xtrace

  # TODO: Use symlink
  $bin osh -n -c 'echo "hello $name"'
}

proc measure-build-times {
  local variant=${1:-opt}

  mkdir -p _bin

  ./configure

  local out_tsv=_tmp/time-tarball-$variant.tsv

  # Header for functions in build/ninja-rules-cpp.sh
  benchmarks/time_.py --tsv --out $out_tsv --rusage --print-header --field verb --field out"

  time TIME_TSV_OUT=$out_tsv" _build/oils.sh '' $variant

  echo
  cat $out_tsv
}

#
# Ninja Wrappers
#

proc oil-slice-demo {
  export PYTHONPATH='.:vendor/'

  echo 'echo hi' | bin/osh_parse.py
  bin/osh_parse.py -c 'ls -l'

  local osh=${1:-bin/osh}

  # Same functionality in bin/oils-for-unix
  echo 'echo hi' | $osh
  $osh -n -c 'ls -l'
  echo ---
  # ast format is none
  $osh --ast-format none -n -c 'ls -l'

  echo '-----'

  # Now test some more exotic stuff
  $osh -c '(( a = 1 + 2 * 3 )); echo $a'

  $osh -c \
    'echo "hello"x $$ ${$} $((1 + 2 * 3)) {foo,bar}@example.com'

  $osh -c 'for x in 1 2 3; do echo $x; done'
}

proc soil-run {
  if test ${container:-} = podman {

    # Work around for ASAN not working in podman

    local bin=_bin/cxx-dbg/osh

    log "Using $bin for podman"
    log ''

  } else {
    local bin=_bin/cxx-asan/osh
  }

  ninja $bin
  echo

  echo "Built $bin"
  echo

  $bin --version
  echo

  oil-slice-demo $bin
}

@ARGV
    (DONE build/native.sh)
#!/usr/bin/env bash
#
# Compile OVM tarball.
#
# Usage:
#   build/ovm-compile.sh <function name>

set -o nounset
set -o pipefail
set -o errexit
shopt -s strict:all 2>/dev/null || true  # dogfood for OSH

setvar REPO_ROOT = $(cd $(dirname $0)/..; pwd)
readonly REPO_ROOT

source build/common.sh

proc source-detected-config-or-die {
  if ! source _build/detected-config.sh {
    # Make this error stand out.
    echo
    echo "FATAL: can't find _build/detected-config.h.  Run './configure'"
    echo
    exit 1
  }
}

# NOTES on trying to delete certain modules:
#
# _warnings.c: There weren't that many; it probably could be deleted.
# bufferobject.c: the types.py module uses it.
# Python-ast.h: pythonrun.c uses it in several places (mod_ty), and a lot of
# stuff uses pythonrun.c.
# pythonrun.c: lots interpreter flags and interpreter initialization caused
# link errors.
# pyctype.c: Tables needed for many string operations.

# getargs.c: needed for Python-C API, e.g. PyArg_ParseTuple.
# dtoa.c: not tried, but I assume that %.3f for 'time' uses it.


readonly OVM_PYTHON_OBJS='
Python/_warnings.c
Python/bltinmodule.c
Python/ceval.c
Python/errors.c
Python/getargs.c
Python/getcompiler.c
Python/getplatform.c
Python/getversion.c
Python/import.c
Python/marshal.c
Python/modsupport.c
Python/mystrtoul.c
Python/mysnprintf.c
Python/pyarena.c
Python/pyctype.c
Python/pyfpe.c
Python/pystate.c
Python/pythonrun.c
Python/random.c
Python/structmember.c
Python/sysmodule.c
Python/traceback.c
Python/pystrtod.c
Python/dtoa.c
Python/pymath.c
'
# NOTE: pystrtod.c needs some floating point functions in pymath.c

setvar OBJECT_OBJS = ''
Objects/abstract.c
Objects/boolobject.c
Objects/bufferobject.c
Objects/bytes_methods.c
Objects/capsule.c
Objects/cellobject.c
Objects/classobject.c
Objects/cobject.c
Objects/codeobject.c
Objects/descrobject.c
Objects/enumobject.c
Objects/exceptions.c
Objects/genobject.c
Objects/fileobject.c
Objects/floatobject.c
Objects/frameobject.c
Objects/funcobject.c
Objects/intobject.c
Objects/iterobject.c
Objects/listobject.c
Objects/longobject.c
Objects/dictobject.c
Objects/methodobject.c
Objects/moduleobject.c
Objects/object.c
Objects/obmalloc.c
Objects/rangeobject.c
Objects/setobject.c
Objects/sliceobject.c
Objects/stringobject.c
Objects/structseq.c
Objects/tupleobject.c
Objects/typeobject.c
Objects/weakrefobject.c
''

# Non-standard lib stuff.
setvar MODULE_OBJS = ''
Modules/main.c
Modules/gcmodule.c
''

# The stuff in Modules/Setup.dist, signalmodule.c.  NOTE: In Python,
# signalmodule.c is specified in Modules/Setup.config, which comes from
# 'configure' output.
setvar MODOBJS = ''
Modules/errnomodule.c
Modules/pwdmodule.c
Modules/_weakref.c
Modules/zipimport.c
Modules/signalmodule.c
''

# Parser/myreadline.c is needed for raw_input() to work.  There is a dependency
# from Python/bltinmodule.c to it.
setvar OVM_LIBRARY_OBJS = ""
Modules/getbuildinfo.c
Parser/myreadline.c
$OBJECT_OBJS
$OVM_PYTHON_OBJS
$MODULE_OBJS
$MODOBJS
""

readonly EMPTY_STR='""'

# Stub out a few variables
readonly PREPROC_FLAGS=(
  -D OVM_MAIN \
  -D PYTHONPATH="$EMPTY_STR" \
  -D VERSION="$EMPTY_STR" \
  -D VPATH="$EMPTY_STR" \
  -D Py_BUILD_CORE \
  # Python already has support for disabling complex numbers!
  -D WITHOUT_COMPLEX
)

# NOTE: build/oil-defs is hard-coded to the oil.ovm app.  We're abandoning
# hello.ovm and opy.ovm for now, but those can easily be added later.  We
# haven't mangled the CPython source!
readonly INCLUDE_PATHS=(
  -I .   # for pyconfig.h
  -I ..  # for _gen/frontend/id_kind_asdl_c.h etc.
  -I Include
  -I ../build/oil-defs
)
readonly CC=${CC:-cc}  # cc should be on POSIX systems

# BASE_CFLAGS is copied by observation from what configure.ac does on my Ubuntu
# 16.04 system.  Then we check if it works on Alpine Linux too.

# "Python violates C99 rules, by casting between incompatible pointer types.
# GCC may generate bad code as a result of that, so use -fno-strict-aliasing if
# supported."
# - gcc 4.x and Clang need -fwrapv

# TODO:
# - -DNDEBUG is also passed.  That turns off asserts.  Do we want that?
# - We should auto-detect the flags in configure, or simplify the source so it
# isn't necessary.  Python's configure.ac sometimes does it by compiling a test
# file; at other times it does it by grepping $CC --help.

# pyext/fanos.c needs -std=c99
setvar BASE_CFLAGS = ''-fno-strict-aliasing -fwrapv -Wall -Wstrict-prototypes -std=c99''

# These flags are disabled for OS X.  I would have thought it would work in
# Clang?  It works with both GCC and Clang on Linux.
# https://stackoverflow.com/questions/6687630/how-to-remove-unused-c-c-symbols-with-gcc-and-ld
#BASE_CFLAGS="$BASE_CFLAGS -fdata-sections -ffunction-sections"

# Needed after cpython-defs filtering.
setvar BASE_CFLAGS = ""$BASE_CFLAGS -Wno-unused-variable -Wno-unused-function""
readonly BASE_CFLAGS

setvar BASE_LDFLAGS = ''''
# Disabled for OS X
# BASE_LDFLAGS='-Wl,--gc-sections'

# The user should be able to customize CFLAGS, but it shouldn't disable what's
# in BASE_CFLAGS.
readonly CFLAGS=${CFLAGS:-}
readonly LDFLAGS=${LDFLAGS:-}

proc build {
  local out=${1:-$PY27/ovm2}
  local module_init=${2:-$PY27/Modules/config.c}
  local main_name=${3:-_tmp/hello/main_name.c}
  local c_module_srcs=${4:-_tmp/hello/c-module-srcs.txt}
  shift 4

  local abs_out=$PWD/$out
  local abs_module_init=$PWD/$module_init
  local abs_main_name=$PWD/$main_name
  local abs_c_module_srcs=$PWD/$c_module_srcs

  #echo $OVM_LIBRARY_OBJS

  # HAVE_READLINE defined in detected-config.sh.
  source-detected-config-or-die

  pushd $PY27

  local readline_flags=''
  if [[ "$HAVE_READLINE" -eq 1 ]] {
    # Readline interface for tokenizer.c and [raw_]input() in bltinmodule.c.
    # For now, we are using raw_input() for the REPL.  TODO: Parameterize this!
    # We should create a special no_readline_raw_input().

    setvar c_module_src_list = $(cat $abs_c_module_srcs)

    if [[ -n "$READLINE_DIR" ]] {
      setvar readline_flags = ""-L $READLINE_DIR/lib -I $READLINE_DIR/include ""
    }

    # NOTE: pyconfig.h has HAVE_LIBREADLINE but doesn't appear to use it?
    setvar readline_flags = ""-l readline -D HAVE_READLINE""
  } else {
    # don't fail
    setvar c_module_src_list = $(grep -E -v '/readline.c|/line_input.c' $abs_c_module_srcs || true)
  }

  # $PREFIX comes from ./configure and defaults to /usr/local.
  # $EXEC_PREFIX is a GNU thing and used in getpath.c.  Could probably get rid
  # of it.

  time $CC \
    ${BASE_CFLAGS} \
    ${CFLAGS} \
    ${INCLUDE_PATHS[@]} \
    ${PREPROC_FLAGS[@]} \
    -D PREFIX="\"$PREFIX\"" \
    -D EXEC_PREFIX="\"$PREFIX\"" \
    -o $abs_out \
    $OVM_LIBRARY_OBJS \
    $abs_module_init \
    $abs_main_name \
    $c_module_src_list \
    Modules/ovm.c \
    -l m \
    ${BASE_LDFLAGS} \
    ${LDFLAGS} \
    $readline_flags \
    @ARGV

  # NOTE:
  # -l readline -l termcap -- for Python readline.  Hm it builds without -l
  # termcap.
  # -l z WOULD be needed for zlibmodule.c, but we don't need it because our zip
  # file has no compression -- see build/make_zip.py with ZIP_STORED.
  # zipimport works fine without this.
}

# build the optimized one.  Makefile uses -O3.

# Clang -O2 is 1.37 MB.  18 seconds to compile.
#   -m32 is 1.12 MB.  But I probably have to redefine a few things because
#   there are more warnings.
# -O3 is 1.40 MB.

# GCC -O2 is 1.35 MB.  21 seconds to compile.

proc build-dbg {
  build @ARGV -O0 -g -D OVM_DEBUG
}

# This will be stripped later.
proc build-opt {
  # frame pointer for perf.  Otherwise stack traces are messed up!
  # http://www.brendangregg.com/FlameGraphs/cpuflamegraphs.html#C  But why
  # isn't debuginfo enough?  Because it's a recursive function?
  # Does this make things slower?  Do I need a "perf" build?
  build @ARGV -O3 -fno-omit-frame-pointer
}

#
# Source Release (uses same files
#

proc add-py27 {
  xargs -I {} -- echo $PY27/{}
}

proc python-sources {
  echo $OVM_LIBRARY_OBJS | add-py27
}

proc _headers {
  local c_module_srcs=${1:-_tmp/hello/c-module-srcs.txt}
  local abs_c_module_srcs=$PWD/$c_module_srcs

  cd $PY27

  # -MM: no system headers
  gcc \
    ${INCLUDE_PATHS[@]} \
    ${PREPROC_FLAGS[@]} \
    -MM $OVM_LIBRARY_OBJS \
    Modules/ovm.c \
    $(cat $abs_c_module_srcs)
}

# NOTE: 91 headers in Include, but only 81 referenced here.  So it's worth it.
# These are probably for the parser.
#
# NOTE: We also should get rid of asdl.h and so forth.

proc python-headers {
  local c_module_srcs=$1

  # 1. -MM outputs Makefile fragments, so egrep turns those into proper lines.
  #
  # 2. The user should generated detected-config.h, so remove it.
  #
  # 3. # gcc outputs paths like
  # Python-2.7.13/Python/../Objects/stringlib/stringdefs.h
  # but 'Python/..' causes problems for tar.
  #

  # NOTE: need .def for build/oil-defs.
  _headers $c_module_srcs \
    | egrep --only-matching '[^ ]+\.(h|def)' \
    | grep -v '_build/detected-config.h' \
    | sed 's|^Python/../||' \
    | sort | uniq | add-py27
}

proc make-tar {
  local app_name=${1:-hello}
  local bytecode_zip=${2:-bytecode-cpython.zip}
  local out=${3:-_release/hello.tar}

  local version_file
  case (app_name) {
    oil {
      setvar version_file = 'oil-version.txt'
      }
    hello {
      setvar version_file = 'build/testdata/hello-version.txt'
      }
    * {
      die "Unknown app $app_name"
      exit 1
      }
  }
  local version=$(head -n 1 $version_file)

  echo "Creating $app_name version $version"

  local c_module_srcs=_build/$app_name/c-module-srcs.txt

  # Add oil-0.0.0/ to the beginning of every path.
  local sed_expr="s,^,${app_name}-${version}/,"

  # Differences between tarball and repo:
  #
  # - build/portable-rules.mk is intentionally not included in the release tarball.
  #   The Makefile can and should operate without it.
  #
  # - We include intermediate files like c-module-srcs.txt, so we don't have to
  #   ship tools dynamic_deps.py.  The end-user build shouldn't depend on Python.

  # Note: python-headers runs gcc -M, including pyconfig.h and
  # _build/detected-config.h.

  tar --create --transform $sed_expr --file $out \
    LICENSE.txt \
    INSTALL.txt \
    configure \
    install \
    uninstall \
    Makefile \
    doc/osh.1 \
    build/ovm-compile.sh \
    build/ovm-actions.sh \
    build/clean.sh \
    build/common.sh \
    build/detect-*.c \
    _build/$app_name/$bytecode_zip \
    _build/$app_name/*.c \
    $PY27/LICENSE \
    $PY27/Modules/ovm.c \
    $c_module_srcs \
    $(cat $c_module_srcs | add-py27) \
    $(python-headers $c_module_srcs) \
    $(python-sources)

  ls -l $out
}

# 123K lines.
# Excluding MODOBJS, it's 104K lines.
#
# Biggest: posixmodule,unicodeobject,typeobject,ceval.
#
# Remove tmpnam from posixmodule, other cruft.
#
# Big ones to rid of: unicodeobject.c, import.c
# codecs and codecsmodule?  There is some non-unicode stuff there though.
#
# Probably need unicode for compatibility with modules and web frameworks
# especially.

proc count-c-lines {
  pushd $PY27
  wc -l $OVM_LIBRARY_OBJS | sort -n

  # 90 files.
  # NOTE: To count headers, use the tar file.
  echo
  echo 'Files:'
  do { for i in $OVM_LIBRARY_OBJS {
     echo $i
    }
  } | wc -l

  popd
}

@ARGV
    (DONE build/ovm-compile.sh)
#!/usr/bin/env bash
#
# Ninja rules for translating Python to C++.
#
# Usage:
#   build/ninja-rules-py.sh <function name>
#
# Env variables:
#   EXTRA_MYCPP_ARGS - passed to mycpp_main

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source build/dev-shell.sh  # python2 in $PATH
source mycpp/common-vars.sh  # MYPY_REPO
source $REPO_ROOT/test/tsv-lib.sh  # time-tsv

proc example-main {
  local main_module=${1:-fib_iter}

  cat <<< """
int main(int argc, char **argv) {
  gHeap.Init();

  char* b = getenv("BENCHMARK");
  if (b && strlen(b)) {  // match Python's logic
    fprintf(stderr, "Benchmarking...'\'n");
    $main_module::run_benchmarks();
  } else {
    $main_module::run_tests();
  }

  gHeap.CleanProcessExit();
}
"""
}

proc oils-for-unix-main {
  local main_namespace=$1

  cat <<< """
int main(int argc, char **argv) {
  mylib::InitCppOnly();  // Initializes gHeap

  auto* args = Alloc<List<BigStr*>>();
  for (int i = 0; i < argc; ++i) {
    args->append(StrFromC(argv[i]));
  }

  int status = $main_namespace::main(args);

  gHeap.ProcessExit();

  return status;
}
"""
}

proc gen-oils-for-unix {
  local main_name=$1
  local out_prefix=$2
  shift 2  # rest are inputs

  # Put it in _build/tmp so it's not in the tarball
  local tmp=_build/tmp
  mkdir -p $tmp

  local raw_cc=$tmp/oils_for_unix_raw.cc
  local cc_out=${out_prefix}.cc

  local raw_header=$tmp/oils_for_unix_raw.h
  local header_out=${out_prefix}.h

  local mypypath="$REPO_ROOT:$REPO_ROOT/pyext"

  _bin/shwrap/mycpp_main $mypypath $raw_cc \
    --header-out $raw_header \
    ${EXTRA_MYCPP_ARGS:-} \
    @ARGV

  do { echo "// $main_name.h: translated from Python by mycpp"
    echo
    echo '#ifndef OILS_FOR_UNIX_MYCPP_H'
    echo '#define OILS_FOR_UNIX_MYCPP_H'

    cat $raw_header

    echo '#endif  // OILS_FOR_UNIX_MYCPP_H'

  } > $header_out

  do { cat <<< """
// $main_name.cc: translated from Python by mycpp

// #include "$header_out"

#include "cpp/preamble.h"
"""

    cat $raw_cc

    oils-for-unix-main $main_name
  } > $cc_out
}

proc print-wrap-cc {
  local translator=$1
  local main_module=$2
  local in=$3
  local preamble_path=$4

   echo "// examples/$main_module translated by $translator"
   echo

   if test -f $preamble_path {
     echo "#include \"$preamble_path\""
   }

   cat $in

   # main() function
   case (translator) {
     (mycpp {
       example-main $main_module
       }
     (pea {
        echo '#include <stdio.h>'
        echo 'int main() { printf("stub\n"); return 1; }'
       }
     (* {
       die "Invalid translator $translator"
       }
   }
}

proc wrap-cc {
  local out=$1
  shift

  # $translator $main_module $in $preamble_path
  print-wrap-cc @ARGV > $out
}

# TODO: Move mycpp/example tasks out of Ninja since timing is not a VALUE.  It
# depends on the machine, can be done more than once, etc.

proc task {
  local bin=$1  # Run this
  local task_out=$2
  local log_out=$3

  shift 3
  # The rest of the args are passed as flags to time-tsv

  case (bin) {
    (mycpp/examples/*.py {
      # we import mycpp.mylib and pylib.collections_
      export PYTHONPATH="$REPO_ROOT/mycpp:$REPO_ROOT/vendor:$REPO_ROOT"
      }
  }

  case (task_out) {
    (_test/tasks/benchmark/* {
      export BENCHMARK=1
      }
  }

  time-tsv -o $task_out --rusage @ARGV --field $bin --field $task_out -- \
    $bin >$log_out 2>&1
}

proc example-task {
  ### Run a program in the examples/ dir, either in Python or C++

  local name=$1  # e.g. 'fib_iter'
  local impl=$2  # 'Python' or 'C++'

  local bin=$3  # Run this
  local task_out=$4
  local log_out=$5

  task $bin $task_out $log_out --field $name --field $impl
}

proc benchmark-table {
  local out=$1
  shift

  # TODO: Use QTT header with types?
  do { time-tsv --print-header --rusage \
      --field example_name --field impl \
      --field bin --field task_out 

    # Concatenate task files
    cat @ARGV 
  } > $out
}

# TODO: No longer works.  This is called by ninja mycpp-check
# I think it's giving strict warnings.
proc mypy {
  shell { source $MYCPP_VENV/bin/activate"
    # Don't need this since the virtualenv we created with it?
    # source build/dev-shell.sh
    PYTHONPATH=$MYPY_REPO" python3 -m mypy @ARGV;
  }
}

proc typecheck {
  ### Typecheck without translation
  local main_py=$1
  local out=$2
  local skip_imports=${3:-}

  if test -n $skip_imports {
    local more_flags='--follow-imports=silent'
  } else {
    local more_flags=''
  }"

  # $more_flags can be empty
  MYPYPATH="$REPO_ROOT:$REPO_ROOT/mycpp"" \
    mypy --py2 --strict $more_flags $main_py > $out
}

proc logs-equal {
  local out=$1
  shift

  mycpp/compare_pairs.py @ARGV | tee $out
}

#
# shwrap rules
#

proc shwrap-py {
  ### Part of shell template for Python executables

  local main=$1
  echo 'PYTHONPATH=$REPO_ROOT:$REPO_ROOT/vendor exec $REPO_ROOT/'$main' "$@"'
}

proc shwrap-mycpp {
  ### Part of shell template for mycpp executable

  cat <<< '''
MYPYPATH=$1    # e.g. $REPO_ROOT/mycpp
out=$2
shift 2

# Modifies $PATH; do not combine
. build/dev-shell.sh

tmp=$out.tmp  # avoid creating partial files

MYPYPATH="$MYPYPATH" \
  python3 mycpp/mycpp_main.py --cc-out $tmp "$@"
status=$?

mv $tmp $out
exit $status
'''
}

proc shwrap-pea {
  ### Part of shell template for pea executable

  cat <<< '''
MYPYPATH=$1    # e.g. $REPO_ROOT/mycpp
out=$2
shift 2

tmp=$out.tmp  # avoid creating partial files

PYTHONPATH="$REPO_ROOT:$MYPY_REPO" MYPYPATH="$MYPYPATH" \
  python3 pea/pea_main.py cpp "$@" > $tmp
status=$?

mv $tmp $out
exit $status
'''
}

proc print-shwrap {
  local template=$1
  local unused=$2
  shift 2

  cat <<< '''
#!/bin/sh
REPO_ROOT=$(cd "$(dirname $0)/../.."; pwd)
. $REPO_ROOT/build/py2.sh
'''

  case (template) {
    (py {
      local main=$1  # additional arg
      shift
      shwrap-py $main
      }
    (mycpp {
      shwrap-mycpp
      }
    (pea {
      shwrap-pea
      }
    (* {
      die "Invalid template '$template'"
      }
  }

  echo
  echo '# DEPENDS ON:'
  for dep in "$@" {
    echo "#   $dep"
  }
}

proc write-shwrap {
  ### Create a shell wrapper for a Python tool

  # Key point: if the Python code changes, then the C++ code should be
  # regenerated and re-compiled

  local unused=$1
  local stub_out=$2

  print-shwrap @ARGV > $stub_out
  chmod +x $stub_out
}

# sourced by devtools/bin.sh
if test $(basename $0) = 'ninja-rules-py.sh' {
  @ARGV
}
    (DONE build/ninja-rules-py.sh)
#!/usr/bin/env bash
#
# Build actions used in the Makefile.
#
# Usage:
#   build/ovm-actions.sh <function name>

set -o nounset
set -o pipefail
set -o errexit
shopt -s strict:all 2>/dev/null || true  # dogfood for OSH

setvar REPO_ROOT = $(cd $(dirname $0)/..; pwd)
readonly REPO_ROOT

source build/common.sh

proc main-name {
  local python_main=${1:-hello}
  local ovm_bundle_prefix=${2:-hello.ovm}

  cat <<< """ 
char* MAIN_NAME = "$python_main";
#if OVM_DEBUG
  char* OVM_BUNDLE_FILENAME = "${ovm_bundle_prefix}-dbg";
#else
  char* OVM_BUNDLE_FILENAME = "$ovm_bundle_prefix";
#endif
"""
}

proc c-module-toc {
  cd $PY27
  ../build/c_module_toc.py
}

# Modules needed to 'import runpy'.
proc runpy-deps {
  $PREPARE_DIR/python -S build/runpy_deps.py both @ARGV
}

proc runpy-py-to-compile {
  $PREPARE_DIR/python -S build/runpy_deps.py py
}

# This version gets the paths out of the repo.  But it requires that we
# build all of Python!
#
# OK yeah so there are a few steps to building minimal app bundles.
# 1. Build all of Python normally.  Normal -D options.
#    ./run.sh build-clang-default
# 2. Then run a special build that is based on that.
#
# Only need a debug build.

# Run  grep -F .so  for the native dependencies.  Have to add those
# somewhere.
proc app-deps {
  local app_name=${1:-hello}
  local pythonpath=${2:-build/testdata}
  local main_module=${3:-hello}

  local prefix=_build/$app_name/app-deps"

  PYTHONPATH=$pythonpath" \
    $PREPARE_DIR/python -S build/dynamic_deps.py both $main_module $prefix
}

# .py files to compile
proc py-to-compile {
  local pythonpath=${1:-build/testdata}
  local main_module=${2:-hello}"

  PYTHONPATH=$pythonpath" \
    $PREPARE_DIR/python -S build/dynamic_deps.py py $main_module
}

# For embedding in oil/bytecode.zip.
proc help-manifest {
  local dir=$1
  for path in $dir/* {
    echo "$path $path"  # relative path is the same
  }
}

proc ysh-stdlib-manifest {
  for path in stdlib/*.ysh {
    echo "$path $path"  # relative path is the same
  }
}

proc pyc-version-manifest {
  local manifest_path=${1:-_build/oil/bytecode-opy-manifest.txt}  # For example

  # Just show a string like "bytecode-opy.zip" for now.  There is no OPy
  # version yet.
  local filename=$(basename $manifest_path) 
  local user_str=${filename%-manifest.txt}.zip
  local dir=$(dirname $manifest_path)

  echo $user_str > $dir/pyc-version.txt

  # Put it at the root, like release-date and oil-version.txt.
  echo $dir/pyc-version.txt pyc-version.txt
}

# Make .d file
proc make-dotd {
  local app_name=${1:-hello}
  local app_deps_to_compile=${2:-_tmp/hello/app-deps-to-compile.txt}

  # TODO: For each module, look it up in the manifest.
  # I guess make a Python file.

  echo "# TODO $app_deps_to_compile"

  # The dependencies we want.
  # X to prevent screwing things up.
  echo "X_build/$app_name/ovm:"
  echo "X_build/$app_name/ovm-dbg:"
  echo "X_build/$app_name/ovm-cov:"
}

#
# C Code generation.  The three functions below are adapted from
# Modules/makesetup.
#

proc extdecls {
  for mod in "$@" {
    test $mod = line_input && echo "#ifdef HAVE_READLINE"
    echo "extern void init$mod(void);"
    test $mod = line_input && echo "#endif"
  }
  return 0  # because test can fail
}

proc initbits {
  for mod in "$@" {
    test $mod = line_input && echo "#ifdef HAVE_READLINE"
    echo "    {\"$mod\", init$mod},"
    test $mod = line_input && echo "#endif"
  }
  return 0  # because test can fail
}

# Ported from sed to awk.  Awk is MUCH nicer (no $NL ugliness, -v flag, etc.)
proc gen-module-init {
  local extdecls
  setvar extdecls = $(extdecls "$@")
  local initbits
  setvar initbits = $(initbits "$@")

  local template=$PY27/Modules/config.c.in

  awk -v template=$template -v extdecls="$extdecls" -v initbits="$initbits" '
    BEGIN {
      print "/* Generated automatically from " template " */"
    }
    /MARKER 1/ {
      print extdecls
      next
    }
    /MARKER 2/ {
      print initbits
      next
    }
    {
      print $0
    }
    ' $template
}

#
# C Modules
#

proc join-modules {
  local static=${1:-static-c-modules.txt}
  local discovered=${2:-_build/oil/all-deps-c.txt}

  # Filter out comments, print the first line.
  #
  # TODO: I don't want to depend on egrep and GNU flags on the target systems?
  # Ship this file I guess.
  egrep --no-filename --only-matching '^[a-zA-Z0-9_\.]+' $static $discovered \
    | sort | uniq
}

@ARGV
    (DONE build/ovm-actions.sh)
#!/usr/bin/env bash
#
# Script for contributors to quickly set up core packages
#
# Usage:
#   build/deps.sh <function name>
#
# Examples:
#   build/deps.sh fetch
#   build/deps.sh install-wedges
#   build/deps.sh rm-oils-crap  # rm /wedge ~/wedge to start over
#
# - re2c
# - cmark
# - python3
# - mypy and deps, so mycpp can import htem

# TODO:
# - remove cmark dependency for help.  It's still used for docs and benchmarks.
# - remove re2c from dev build?  Are there any bugs?  I think it's just slow.
# - add spec-bin so people can always run the tests
#
# - change Contributing page
#   - build/deps.sh fetch-py
#   - build/deps.sh install-wedges-py
#
# mycpp/README.md:
#
#   - build/deps.sh fetch
#   - build/deps.sh install-wedges
#
# Can we make most of them non-root deps?

set -o nounset
set -o pipefail
set -o errexit

source build/dev-shell.sh  # python3 in PATH, PY3_LIBS_VERSION
source deps/from-apt.sh      # PY3_BUILD_DEPS
#source deps/podman.sh
source devtools/run-task.sh  # run-task

# Also in build/dev-shell.sh
setvar USER_WEDGE_DIR = "~/wedge/oils-for-unix.org"

readonly DEPS_SOURCE_DIR=_build/deps-source

readonly RE2C_VERSION=3.0
readonly RE2C_URL="https://github.com/skvadrik/re2c/releases/download/$RE2C_VERSION/re2c-$RE2C_VERSION.tar.xz"

readonly CMARK_VERSION=0.29.0
readonly CMARK_URL="https://github.com/commonmark/cmark/archive/$CMARK_VERSION.tar.gz"

readonly PY2_VERSION=2.7.18
readonly PY2_URL="https://www.python.org/ftp/python/2.7.18/Python-$PY2_VERSION.tar.xz"

readonly PY3_VERSION=3.10.4
readonly PY3_URL="https://www.python.org/ftp/python/3.10.4/Python-$PY3_VERSION.tar.xz"

readonly BASH_VER=4.4  # don't clobber BASH_VERSION
readonly BASH_URL="https://www.oilshell.org/blob/spec-bin/bash-$BASH_VER.tar.gz"

readonly DASH_VERSION=0.5.10.2
readonly DASH_URL="https://www.oilshell.org/blob/spec-bin/dash-$DASH_VERSION.tar.gz"

readonly ZSH_VERSION=5.1.1
readonly ZSH_URL="https://www.oilshell.org/blob/spec-bin/zsh-$ZSH_VERSION.tar.xz"

readonly MKSH_VERSION=R52c
readonly MKSH_URL="https://www.oilshell.org/blob/spec-bin/mksh-$MKSH_VERSION.tgz"

readonly BUSYBOX_VERSION='1.35.0'
readonly BUSYBOX_URL="https://www.oilshell.org/blob/spec-bin/busybox-$BUSYBOX_VERSION.tar.bz2"

readonly YASH_VERSION=2.49
readonly YASH_URL="https://www.oilshell.org/blob/spec-bin/yash-$YASH_VERSION.tar.xz"

readonly MYPY_GIT_URL=https://github.com/python/mypy
readonly MYPY_VERSION=0.780

readonly PY3_LIBS=~/wedge/oils-for-unix.org/pkg/py3-libs/$MYPY_VERSION

# Version 2.4.0 from 2021-10-06 was the last version that supported Python 2
# https://github.com/PyCQA/pyflakes/blob/main/NEWS.rst
readonly PYFLAKES_VERSION=2.4.0
#readonly PYFLAKES_URL='https://files.pythonhosted.org/packages/15/60/c577e54518086e98470e9088278247f4af1d39cb43bcbd731e2c307acd6a/pyflakes-2.4.0.tar.gz'
# 2023-07: Mirrored to avoid network problem on broome during release
readonly PYFLAKES_URL='https://www.oilshell.org/blob/pyflakes-2.4.0.tar.gz'

readonly BLOATY_VERSION=1.1
readonly BLOATY_URL='https://github.com/google/bloaty/releases/download/v1.1/bloaty-1.1.tar.bz2'

readonly UFTRACE_VERSION=0.13
readonly UFTRACE_URL='https://github.com/namhyung/uftrace/archive/refs/tags/v0.13.tar.gz'

proc log {
  echo "$0: $[join(ARGV)]" >& 2
}

proc die {
  log @ARGV
  exit 1
}

proc rm-oils-crap {
  ### When you want to start over

  rm -r -f -v ~/wedge
  sudo rm -r -f -v /wedge
}

# Note: git is an implicit dependency -- that's how we got the repo in the
# first place!

# python2-dev is no longer available on Debian 12
# python-dev also seems gone
#
# wget: for fetching wedges (not on Debian by default!)
# tree: tiny package that's useful for showing what we installed
# g++: essential
# libreadline-dev: needed for the build/prepare.sh Python build.
# gawk: used by spec-runner.sh for the special match() function.
# cmake: for cmark
# PY3_BUILD_DEPS - I think these will be used for building the Python 2 wedge
# as well
readonly -a WEDGE_DEPS_DEBIAN=(
    wget tree g++ gawk libreadline-dev ninja-build cmake
    "${PY3_BUILD_DEPS[@]}"
)

readonly -a WEDGE_DEPS_FEDORA=(

  # Weird, Fedora doesn't have these by default!
  hostname
  tar
  bzip2

  # https://packages.fedoraproject.org/pkgs/wget/wget/
  wget
  # https://packages.fedoraproject.org/pkgs/tree-pkg/tree/
  tree
  gawk

  readline-devel

  # https://packages.fedoraproject.org/pkgs/gcc/gcc/
  gcc gcc-c++

  ninja-build
  cmake

  # Like PY3_BUILD_DEPS
  # https://packages.fedoraproject.org/pkgs/zlib/zlib-devel/
  zlib-devel
  # https://packages.fedoraproject.org/pkgs/libffi/libffi-devel/
  libffi-devel
  # https://packages.fedoraproject.org/pkgs/openssl/openssl-devel/
  openssl-devel

  # For building zsh from source?
  # https://koji.fedoraproject.org/koji/rpminfo?rpmID=36987813
  ncurses-devel
  #libcap-devel

  # still have a job control error compiling bash
  # https://packages.fedoraproject.org/pkgs/glibc/glibc-devel/
  # glibc-devel
)

proc install-ubuntu-packages {
  ### Packages for build/py.sh all, building wedges, etc.

  set -x  # show what needs sudo

  # pass -y for say gitpod
  sudo apt @ARGV install ${WEDGE_DEPS_DEBIAN[@]}
  set +x

  # maybe pass -y through
  test/spec-bin.sh install-shells-with-apt @ARGV
}

proc wedge-deps-debian {
  # Install packages without prompt
  # Debian and Ubuntu packages are the same
  install-ubuntu-packages -y
}

proc wedge-deps-fedora {
   # https://linuxconfig.org/install-development-tools-on-redhat-8
  sudo dnf group install --assumeyes 'Development Tools'

  sudo dnf install --assumeyes ${WEDGE_DEPS_FEDORA[@]}

}

proc download-to {
  local dir=$1
  local url=$2
  wget --no-clobber --directory-prefix $dir $url
}

proc maybe-extract {
  local wedge_dir=$1
  local tar_name=$2
  local out_dir=$3

  if test -d "$wedge_dir/$out_dir" {
    log "Not extracting because $wedge_dir/$out_dir exists"
    return
  }

  local tar=$wedge_dir/$tar_name
  case (tar_name) {
    *.gz|*.tgz {  # mksh ends with .tgz
      setvar flag = ''--gzip''
      }
    *.bz2 {
      setvar flag = ''--bzip2''
      }
    *.xz {
      setvar flag = ''--xz''
      }
    * {
      die "tar with unknown extension: $tar_name"
      }
  }

  tar --extract $flag --file $tar --directory $wedge_dir
}

proc clone-mypy {
  ### replaces deps/from-git
  local dest_dir=$1
  local version=${2:-$MYPY_VERSION}

  local dest=$dest_dir/mypy-$version
  if test -d $dest {
    log "Not cloning because $dest exists"
    return
  }

  # v$VERSION is a tag, not a branch

  # size optimization: --depth=1 --shallow-submodules
  # https://git-scm.com/docs/git-clone

  git clone --recursive --branch v$version \
    --depth=1 --shallow-submodules \
    $MYPY_GIT_URL $dest

  # TODO: verify commit checksum
}

proc fetch {
  local py_only=${1:-}

  # For now, simulate what 'medo expand deps/source.medo _build/deps-source'
  # would do: fetch compressed tarballs designated by .treeptr files, and
  # expand them.

  # _build/deps-source/
  #   re2c/
  #     WEDGE
  #     re2c-3.0/  # expanded .tar.xz file

  mkdir -p $DEPS_SOURCE_DIR

  # Copy the whole tree, including the .treeptr files
  cp --verbose --recursive --no-target-directory \
    deps/source.medo/ $DEPS_SOURCE_DIR/

  download-to $DEPS_SOURCE_DIR/re2c $RE2C_URL
  download-to $DEPS_SOURCE_DIR/cmark $CMARK_URL
  maybe-extract $DEPS_SOURCE_DIR/re2c $(basename $RE2C_URL) re2c-$RE2C_VERSION
  maybe-extract $DEPS_SOURCE_DIR/cmark $(basename $CMARK_URL) cmark-$CMARK_VERSION

  if test -n $py_only {
    log "Fetched dependencies for 'build/py.sh'"
    return
  }
 
  download-to $DEPS_SOURCE_DIR/pyflakes $PYFLAKES_URL
  maybe-extract $DEPS_SOURCE_DIR/pyflakes $(basename $PYFLAKES_URL) \
    pyflakes-$PYFLAKES_VERSION

  download-to $DEPS_SOURCE_DIR/python2 $PY2_URL
  download-to $DEPS_SOURCE_DIR/python3 $PY3_URL
  maybe-extract $DEPS_SOURCE_DIR/python2 $(basename $PY2_URL) Python-$PY2_VERSION
  maybe-extract $DEPS_SOURCE_DIR/python3 $(basename $PY3_URL) Python-$PY3_VERSION

  download-to $DEPS_SOURCE_DIR/bash $BASH_URL
  maybe-extract $DEPS_SOURCE_DIR/bash $(basename $BASH_URL) dash-$BASH_VER

  download-to $DEPS_SOURCE_DIR/dash $DASH_URL
  maybe-extract $DEPS_SOURCE_DIR/dash $(basename $DASH_URL) dash-$DASH_VERSION

  download-to $DEPS_SOURCE_DIR/zsh $ZSH_URL
  maybe-extract $DEPS_SOURCE_DIR/zsh $(basename $ZSH_URL) zsh-$ZSH_VERSION

  download-to $DEPS_SOURCE_DIR/mksh $MKSH_URL
  maybe-extract $DEPS_SOURCE_DIR/mksh $(basename $MKSH_URL) mksh-$MKSH_VERSION

  download-to $DEPS_SOURCE_DIR/busybox $BUSYBOX_URL
  maybe-extract $DEPS_SOURCE_DIR/busybox $(basename $BUSYBOX_URL) busybox-$BUSYBOX_VERSION

  download-to $DEPS_SOURCE_DIR/yash $YASH_URL
  maybe-extract $DEPS_SOURCE_DIR/yash $(basename $YASH_URL) yash-$DASH_VERSION

  # Patch: this tarball doesn't follow the convention $name-$version
  if test -d $DEPS_SOURCE_DIR/mksh/mksh {
    pushd $DEPS_SOURCE_DIR/mksh
    mv -v mksh mksh-$MKSH_VERSION
    popd
  }

  # bloaty and uftrace are for benchmarks, in containers
  download-to $DEPS_SOURCE_DIR/bloaty $BLOATY_URL
  download-to $DEPS_SOURCE_DIR/uftrace $UFTRACE_URL
  maybe-extract $DEPS_SOURCE_DIR/bloaty $(basename $BLOATY_URL) uftrace-$BLOATY_VERSION
  maybe-extract $DEPS_SOURCE_DIR/uftrace $(basename $UFTRACE_URL) bloaty-$UFTRACE_VERSION

  # This is in $DEPS_SOURCE_DIR to COPY into containers, which mycpp will directly import.
  # It's also copied into a wedge in install-wedges.
  clone-mypy $DEPS_SOURCE_DIR/mypy

  if command -v tree > /dev/null {
    tree -L 2 $DEPS_SOURCE_DIR
  }
}

proc mirror-pyflakes {
  ### Workaround for network error during release
  scp \
    $DEPS_SOURCE_DIR/pyflakes/"$(basename $PYFLAKES_URL)" \
    oilshell.org:oilshell.org/blob/
}

proc fetch-py {
  fetch py_only
}

proc mypy-new {
  local version=0.971
  # Do the latest version for Python 2
  clone-mypy $DEPS_SOURCE_DIR/mypy $version

  local dest_dir=$USER_WEDGE_DIR/pkg/mypy/$version
  mkdir -p $dest_dir

  cp --verbose --recursive --no-target-directory \
    $DEPS_SOURCE_DIR/mypy/mypy-$version $dest_dir
}

proc wedge-exists {
  local is_relative=${3:-yes}

  if test -n $is_relative {
    local installed=~/wedge/oils-for-unix.org/pkg/$1/$2
  } else {
    local installed=/wedge/oils-for-unix.org/pkg/$1/$2
  }

  if test -d $installed {
    log "$installed already exists"
    return 0
  } else {
    return 1
  }
}

# TODO: py3-libs needs to be a WEDGE, so that that you can run
# 'wedge build deps/source.medo/py3-libs/' and then get it in
#
# _build/wedge/{absolute,relative}   # which one?
#
# It needs a BUILD DEPENDENCY on:
# - the python3 wedge, so you can do python3 -m pip install.
# - the mypy repo, which has test-requirements.txt

proc download-py3-libs {
  ### Download source/binary packages, AFTER python3 is installed

  # Note that this is NOT source code; there is binary code, e.g.  in
  # lxml-*.whl

  local mypy_dir=${1:-$DEPS_SOURCE_DIR/mypy/mypy-$MYPY_VERSION}
  local py_package_dir=_cache/py3-libs
  mkdir -p $py_package_dir

  # Avoids a warning, but doesn't fix typed_ast
  #python3 -m pip download -d $py_package_dir wheel

  python3 -m pip download -d $py_package_dir -r $mypy_dir/test-requirements.txt
  python3 -m pip download -d $py_package_dir pexpect
}

proc get-typed-ast-patch {
  curl -o deps/typed_ast.patch https://github.com/python/typed_ast/commit/123286721923ae8f3885dbfbad94d6ca940d5c96.patch
}

# Work around typed_ast bug:
#   https://github.com/python/typed_ast/issues/169
#
# Apply this patch
# https://github.com/python/typed_ast/commit/123286721923ae8f3885dbfbad94d6ca940d5c96
#
# typed_ast is tarred up though
proc patch-typed-ast {
  local package_dir=_cache/py3-libs
  local patch=$PWD/deps/typed_ast.patch

  pushd $package_dir
  cat $patch
  echo

  local dir=typed_ast-1.4.3
  local tar=typed_ast-1.4.3.tar.gz

  echo OLD
  ls -l $tar
  echo

  rm -r -f -v $dir
  tar -x -z < $tar

  pushd $dir
  patch -p1 < $patch
  popd
  #find $dir

  # Create a new one
  tar --create --gzip --file $tar typed_ast-1.4.3

  echo NEW
  ls -l $tar
  echo

  popd
}

proc install-py3-libs-in-venv {
  local venv_dir=$1
  local mypy_dir=$2  # This is a param for host build vs. container build
  local package_dir=_cache/py3-libs

  source $venv_dir/bin/activate  # enter virtualenv

  # 2023-07 note: we're installing yapf in a DIFFERENT venv, because it
  # conflicts with MyPy deps!
  # "ERROR: pip's dependency resolver does not currently take into account all
  # the packages that are installed."

  # --find-links uses a "cache dir" for packages (weird flag name!)

  # Avoids a warning, but doesn't fix typed_ast
  #time python3 -m pip install --find-links $package_dir wheel

  # for mycpp/
  time python3 -m pip install --find-links $package_dir -r $mypy_dir/test-requirements.txt

  # pexpect: for spec/stateful/*.py
  time python3 -m pip install --find-links $package_dir pexpect
}

proc install-py3-libs {
  local mypy_dir=${1:-$DEPS_SOURCE_DIR/mypy/mypy-$MYPY_VERSION}

  local py3
  setvar py3 = $(command -v python3)
  case (py3) {
    *wedge/oils-for-unix.org/* {
      }
    * {
      die "python3 is '$py3', but expected it to be in a wedge"
      }
  }

  log "Ensuring pip is installed (interpreter $(command -v python3)"
  python3 -m ensurepip

  local venv_dir=$USER_WEDGE_DIR/pkg/py3-libs/$PY3_LIBS_VERSION
  log "Creating venv in $venv_dir"

  # Note: the bin/python3 in this venv is a symlink to python3 in $PATH, i.e.
  # the /wedge we just built
  python3 -m venv $venv_dir

  log "Installing MyPy deps in venv"

  # Run in a subshell because it mutates shell state
  $0 install-py3-libs-in-venv $venv_dir $mypy_dir
}

proc install-spec-bin {
  if ! wedge-exists dash $DASH_VERSION relative {
    deps/wedge.sh unboxed-build _build/deps-source/dash
  }

  if ! wedge-exists mksh $MKSH_VERSION relative {
    deps/wedge.sh unboxed-build _build/deps-source/mksh
  }

  if ! wedge-exists busybox $BUSYBOX_VERSION relative {
    deps/wedge.sh unboxed-build _build/deps-source/busybox
  }

  #return

  # Compile Error on Fedora - count_all_jobs
  if ! wedge-exists bash $BASH_VER relative {
    deps/wedge.sh unboxed-build _build/deps-source/bash
  }

  # zsh ./configure is NOT detecting 'boolcodes', and then it has a broken
  # fallback in Src/Modules/termcap.c that causes a compile error!  It seems
  # like ncurses-devel should fix this, but it doesn't
  #
  # https://koji.fedoraproject.org/koji/rpminfo?rpmID=36987813
  #
  # from /home/build/oil/_build/deps-source/zsh/zsh-5.1.1/Src/Modules/termcap.c:38:
  # /usr/include/term.h:783:56: note: previous declaration of ‘boolcodes’ with type ‘const char * const[]’
  # 783 | extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) boolcodes[];
  #
  # I think the ./configure is out of sync with the actual build?

  if ! wedge-exists zsh $ZSH_VERSION '' {
    deps/wedge.sh unboxed-build _build/deps-source/zsh
  }

  return

  # Hm this has problem with out-of-tree build?  I think Oils does too actually
  if ! wedge-exists yash $YASH_VERSION relative {
    deps/wedge.sh unboxed-build _build/deps-source/yash
  }
}

proc install-wedges {
  local py_only=${1:-}

  # TODO:
  # - Make all of these RELATIVE wedges
  # - Add
  #   - unboxed-rel-smoke-test -- move it inside container
  #   - rel-smoke-test -- mount it in a different location
  # - Should have a CI task that does all of this!

  if ! wedge-exists cmark 0.29.0 {
    deps/wedge.sh unboxed-build _build/deps-source/cmark/
  }

  if ! wedge-exists re2c 3.0 {
    deps/wedge.sh unboxed-build _build/deps-source/re2c/
  }

  if ! wedge-exists python2 $PY2_VERSION {
    deps/wedge.sh unboxed-build _build/deps-source/python2/
  }

  if test -n $py_only {
    log "Installed dependencies for 'build/py.sh'"
    return
  }

  # Just copy this source tarball
  if ! wedge-exists pyflakes $PYFLAKES_VERSION {
    local dest_dir=$USER_WEDGE_DIR/pkg/pyflakes/$PYFLAKES_VERSION
    mkdir -p $dest_dir

    cp --verbose --recursive --no-target-directory \
      $DEPS_SOURCE_DIR/pyflakes/pyflakes-$PYFLAKES_VERSION $dest_dir
  }

  # TODO: make the Python build faster by using all your cores?
  if ! wedge-exists python3 $PY3_VERSION {
    deps/wedge.sh unboxed-build _build/deps-source/python3/
  }

  # Copy all the contents, except for .git folder.
  if ! wedge-exists mypy $MYPY_VERSION {

    # NOTE: We have to also copy the .git dir, because it has
    # .git/modules/typeshed

    local dest_dir=$USER_WEDGE_DIR/pkg/mypy/$MYPY_VERSION
    mkdir -p $dest_dir

    # Note: pack files in .git/modules/typeshed/objects/pack are read-only
    # this can fail
    cp --verbose --recursive --no-target-directory \
      $DEPS_SOURCE_DIR/mypy/mypy-$MYPY_VERSION $dest_dir
  }

  if ! wedge-exists py3-libs $PY3_LIBS_VERSION {
    download-py3-libs
    patch-typed-ast
    install-py3-libs
  }

  if command -v tree > /dev/null {
    tree -L 3 $USER_WEDGE_DIR
    echo
    tree -L 3 /wedge/oils-for-unix.org
  }
}

# Host wedges end up in ~/wedge
proc uftrace-host {
  ### built on demand; run $0 first

  # BUG: doesn't detect python3
  # WEDGE tells me that it depends on pkg-config
  # 'apt-get install pkgconf' gets it
  # TODO: Should use python3 WEDGE instead of SYSTEM python3?

  deps/wedge.sh unboxed-build _build/deps-source/uftrace
}

proc R-libs-host {
  deps/wedge.sh unboxed-build _build/deps-source/R-libs
}

proc bloaty-host {
  deps/wedge.sh unboxed-build _build/deps-source/bloaty
}

proc install-wedges-py {
  install-wedges py_only
}

proc container-wedges {
  ### Build wedges that are copied into containers, not run on host
  
  # These end up in _build/wedge/binary

  #export-podman

  if true {
    deps/wedge.sh build deps/source.medo/time-helper
    deps/wedge.sh build deps/source.medo/cmark/
    deps/wedge.sh build deps/source.medo/re2c/
    deps/wedge.sh build deps/source.medo/python3/
  }

  if false {
    deps/wedge.sh build deps/source.medo/bloaty/
    deps/wedge.sh build deps/source.medo/uftrace/
  }

  if false {
    # For soil-benchmarks/ images
    deps/wedge.sh build deps/source.medo/R-libs/
  }

}

proc commas {
  # Wow I didn't know this :a trick
  #
  # OK this is a label and a loop, which makes sense.  You can't do it with
  # pure regex.
  #
  # https://shallowsky.com/blog/linux/cmdline/sed-improve-comma-insertion.html
  # https://shallowsky.com/blog/linux/cmdline/sed-improve-comma-insertion.html
  sed ':a;s/\b\([0-9]\+\)\([0-9]\{3\}\)\b/\1,\2/;ta'   
}

proc wedge-sizes {
  # Sizes
  # printf justifies du output

  local tmp=_tmp/wedge-sizes.txt
  du -s --bytes /wedge/*/*/* ~/wedge/*/*/* | awk '
    { print $0  # print the line
      total_bytes += $1  # accumulate
    }
END { print total_bytes " TOTAL" }
' > $tmp
  
  cat $tmp | commas | xargs -n 2 printf '%15s  %s\n'
  echo

  #du -s --si /wedge/*/*/* ~/wedge/*/*/* 
  #echo
}

proc wedge-report {
  # 4 levels deep shows the package
  if command -v tree > /dev/null {
    tree -L 4 /wedge ~/wedge
    echo
  }

  wedge-sizes

  local tmp=_tmp/wedge-manifest.txt

  echo 'Biggest files'
  find /wedge ~/wedge -type f -a -printf '%10s %P\n' > $tmp

  set +o errexit  # ignore SIGPIPE
  sort -n --reverse $tmp | head -n 20 | commas
  set -o errexit

  echo

  # Show the most common file extensions
  #
  # I feel like we should be able to get rid of .a files?  That's 92 MB, second
  # most common
  #
  # There are also duplicate .a files for Python -- should look at how distros
  # get rid of those

  cat $tmp | python3 -c '
import os, sys, collections

bytes = collections.Counter()
files = collections.Counter()

for line in sys.stdin:
  size, path = line.split(None, 1)
  path = path.strip()  # remove newline
  _, ext = os.path.splitext(path)
  size = int(size)

  bytes[ext] += size
  files[ext] += 1

#print(bytes)
#print(files)

n = 20

print("Most common file types")
for ext, count in files.most_common()[:n]:
  print("%10d  %s" % (count, ext))

print()

print("Total bytes by file type")
for ext, total_bytes in bytes.most_common()[:n]:
  print("%10d  %s" % (total_bytes, ext))
' | commas

}

run-task @ARGV
    (DONE build/deps.sh)
#!/usr/bin/env bash
#
# Usage:
#   build/old-ovm-test.sh <function name>

set -o nounset
set -o pipefail
set -o errexit
shopt -s strict:all 2>/dev/null || true  # dogfood for OSH

proc test-oil-bundle {
  make _bin/oil.ovm
  _bin/oil.ovm osh -c 'echo hi'
  ln -s -f oil.ovm _bin/osh
  _bin/osh -c 'echo hi from osh'
}

# Test the different entry points.
proc ovm-main-func {
  echo ---
  echo 'Running nothing'
  echo ---
  local ovm=_build/hello/ovm-dbg'

  _OVM_RUN_SELF=0' $ovm || true

  echo ---
  echo 'Running bytecode.zip'
  echo ---'

  _OVM_RUN_SELF=0' $ovm _build/hello/bytecode.zip || true

  # Doesn't work because of stdlib deps?
  echo ---
  echo 'Running lib.pyc'
  echo ---'

  _OVM_RUN_SELF=0' $ovm build/testdata/lib.pyc

}

@ARGV
    (DONE build/old-ovm-test.sh)
#!/usr/bin/env bash
#
# Usage:
#   build/doc.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

# https://oilshell.org/release/$VERSION/
#  doc/
#    index.html
#    INSTALL.html

readonly OIL_VERSION=$(head -n 1 oil-version.txt)
export OIL_VERSION  # for quick_ref.py

setvar THIS_DIR = $(readlink -f $(dirname $0))
readonly THIS_DIR
setvar REPO_ROOT = $(cd $THIS_DIR/.. && pwd)
readonly REPO_ROOT


proc log {
  echo @ARGV 1>&2
}

#
# Deps (similar to doctools/cmark.sh and build/codegen.sh)
#

readonly MANDOC_DIR='_deps/mdocml-1.14.1'

proc download-mandoc {
  mkdir -p _deps
  wget --no-clobber --directory _deps \
    https://mandoc.bsd.lv/snapshots/mdocml-1.14.1.tar.gz
}

proc build-mandoc {
  cd $MANDOC_DIR
  ./configure
  make
}

proc mandoc {
  $MANDOC_DIR/mandoc @ARGV
}

proc _build-timestamp {
  echo '<hr/>'
  echo "<i>Generated on $(date)</i>"
}

# Places version is used
#
# - in --version
# - in URL for every page?  inside the binary
# - in titles for index, install, osh-quick-ref TOC, etc.
# - in deployment script

# Run with environment variable
proc help-gen '{
  PYTHONPATH=.' doctools/help_gen.py @ARGV
}

proc cmark '{
  # h2 and h3 are shown in TOC.  The blog uses "legacy" h3 and h4.
  PYTHONPATH=.' doctools/cmark.py --toc-tag h2 --toc-tag h3 --toc-pretty-href @ARGV
}

readonly MARKDOWN_DOCS=(
  # Help index has its own rendering

  # polished
  getting-started
  known-differences
  error-handling
  json
  hay
  simple-word-eval
  quirks
  warts

  eggex
  ysh-regex-api
  upgrade-breakage
  ysh-tour

  style-guide
  novelties

  proc-func
  block-literals

  # Data language
  qsn
  qtt
  j8-notation

  doc-toolchain
  doc-plugins
  idioms
  shell-idioms
  ysh-faq

  language-influences
  ysh-vs-python
  ysh-vs-shell

  syntactic-concepts
  syntax-feelings
  command-vs-expression-mode

  # needs polish
  # Note: docs about the Oil language are prefixed 'oil-'.
  # data-model and command-vs-expression-mode span both OSH and Oil.

  index
  faq-doc

  options

  old/index
  old/project-tour
  old/legacy-array
  old/ysh-keywords
  old/modules
  old/expression-language
  old/word-language
  old/errors
  old/ysh-builtins

  io-builtins
  unicode
  framing
  xtrace
  headless
  completion
  strings
  variables

  # Internal stuff
  interpreter-state
  process-model
  architecture-notes
  parser-architecture
)

# Bug fix: Plain $(date) can output unicode characters (e.g. in Japanese
# locale), which is loaded by Python into say u'\u5e74'.  But the default
# encoding in Python 2 is still 'ascii', which means that '%s' % u_str may
# fail.
#
# I believe --rfc-e-mail should never output a Unicode character.
#
# A better fix would be to implement json_utf8.load(f), which doesn't decode
# into unicode instances.  This would remove useless conversions.

readonly TIMESTAMP=$(date --rfc-email)

proc split-and-render {
  local src=${1:-doc/known-differences.md}

  local rel_path=${src%'.md'}  # doc/known-differences
  local tmp_prefix=_tmp/$rel_path  # temp dir for splitting

  local out=${2:-_release/VERSION/$rel_path.html}
  local web_url=${3:-'../web'}

  mkdir -v -p $(dirname $out) $tmp_prefix

  # Also add could add css_files.  The one in the file takes precedence always?

  # css_files: a space-separated list
  # all_docs_url: so we link from doc/foo.html -> doc/

  local css_files="$web_url/base.css $web_url/manual.css $web_url/toc.css $web_url/language.css $web_url/code.css"

  doctools/split_doc.py \
    -v build_timestamp="$TIMESTAMP" \
    -v oil_version="$OIL_VERSION" \
    -v css_files="$css_files" \
    -v all_docs_url='.' \
    -v repo_url="$src" \
    $src $tmp_prefix

  #ls -l _tmp/doc
  #head _tmp/doc/*
  #return

  # for ysh-tour code blocks
  local code_out=_tmp/code-blocks/$rel_path.txt
  mkdir -v -p $(dirname $code_out)

  cmark \
    --code-block-output $code_out \
    ${tmp_prefix}_meta.json ${tmp_prefix}_content.md > $out

  log "$tmp_prefix -> (doctools/cmark) -> $out"
}

# Special case for README
# Do NOT split because we don't want front matter in the markdown source.
proc render-only {
  local src=${1:-README.md}
  local css_files=${2:-'../web/manual.css ../web/toc.css'}
  local title=${3:-'Oil Source Code'}

  local name
  case (src) { 
    *.md {
      setvar name = $(basename $src .md)
      }
    *.txt {
      setvar name = $(basename $src .txt)
      }
    * {
      setvar name = $(basename $src)
      }
  }

  local prefix=_tmp/doc/$name
  local out=_release/VERSION/doc/$name.html

  local meta=${prefix}_meta.json 
  cat >$meta <<< """
{ "title": "$title",
  "repo_url": "$src",
  "css_files": "$css_files",
  "all_docs_url": ".",

  "build_timestamp": "$TIMESTAMP",
  "oil_version": "$OIL_VERSION"
}
"""

  cmark $meta $src > $out
  log "Wrote $out"
}

proc special {
  render-only 'README.md' '../web/base.css ../web/manual.css ../web/toc.css' 'Oil Source Code'
  render-only 'INSTALL.txt' '../web/base.css ../web/install.css' 'Installing Oil'

  # These pages aren't in doc/
  split-and-render doc/release-index.md _tmp/release-index.html
  split-and-render doc/release-quality.md _tmp/release-quality.html
}

proc all-markdown {
  make-dirs

  # TODO: We can set repo_url here!  Then we don't need it for most docs.
  # split_doc.py can return {} if the doc doesn't start with ---

  #for d in doc/index.md doc/known-differences.md doc/*-manual.md \
  #  doc/eggex.md doc/oil-options.md doc/oil-func-proc-block.md; do
  for d in "${MARKDOWN_DOCS[@]}" {
    split-and-render doc/$d.md
  }

  special
}

proc redir-body {
  local to_url=$1  # WARNING: no escaping
  cat <<< """
<head>
  <meta http-equiv="Refresh" content="0; URL=$to_url" />
</head>
"""
}

proc redirect-pairs {
  # we want want /release/latest/ URLs to still work
  cat <<< """
oil-language-tour ysh-tour
oil-language-faq ysh-faq
oil-help ysh-help
oil-help-topics ysh-help-topics
"""
}

proc all-redirects {
  redirect-pairs | while read -r from_page to_page {
    redir-body "$to_page.html" | tee "_release/VERSION/doc/$from_page.html"
  }
}

# TODO: This could use some CSS.
proc man-page {
  local root_dir=${1:-_release/VERSION}
  mandoc -T html doc/osh.1 > $root_dir/osh.1.html
  ls -l $root_dir
}

# I want to ship the INSTALL file literally, so just mutate things
proc _sed-ext {
  sed --regexp-extended -i @ARGV
}

proc update-src-versions {
  _sed-ext \
    "s/[0-9]+\.[0-9]+\.[a-z0-9]+/$OIL_VERSION/g" \
    doc/release-*.md

  # we need to update tarball paths, /release/0.8.4/ URL, etc.
  _sed-ext \
    "s/[0-9]+\.[0-9]+\.[a-z0-9]+/$OIL_VERSION/g" INSTALL.txt

  _sed-ext \
    "s;/release/[0-9]+\.[0-9]+\.[a-z0-9]+/;/release/$OIL_VERSION/;g" \
    doc/osh.1
}

proc oil-grammar '{
  PYTHONPATH=.' ysh/cmd_parse.py @ARGV
}

#
# Test Tools
#

proc split-doc-demo {
  cat > _tmp/testdoc.md <<< """
---
title: foo
---

Title
=====

hello

"""

  doctools/split_doc.py _tmp/testdoc.md _tmp/testdoc

  head _tmp/testdoc*
}

#
# Help is both markdown and text
#

readonly TMP_DIR=_tmp/doc
readonly CODE_BLOCK_DIR=_tmp/code-blocks
readonly TEXT_DIR=_devbuild/help
readonly HTML_DIR=_release/VERSION
readonly CODE_DIR=_devbuild/gen

proc cards-from-indices {
  ### Make help cards

  for lang in osh ysh data {
    help-gen cards-from-index $lang $TEXT_DIR \
      < $HTML_DIR/doc/ref/toc-$lang.html
  }
}

proc cards-from-chapters {
  ### Turn h3 topics into cards

  local py_out=$CODE_DIR/help_meta.py

  mkdir -p _gen/frontend
  local cc_prefix=_gen/frontend/help_meta

  help-gen cards-from-chapters $TEXT_DIR $py_out $cc_prefix \
    $HTML_DIR/doc/ref/chap-*.html
}

proc ref-check {
  ### Check indexes and chapters against each other

  help-gen ref-check \
    doc/ref/toc-*.md \
    _release/VERSION/doc/ref/chap-*.html
}

proc tour {
  ### Build the Tour of YSH, and execute code as validation
  local name=${1:-ysh-tour}

  split-and-render doc/$name.md

  local work_dir=$REPO_ROOT/_tmp/code-blocks/doc

  mkdir -p $work_dir/lib

  # Files used by module example
  touch $work_dir/{build,test}.sh

  cat >$work_dir/lib/util.ysh <<< """
log() { echo "$[join(ARGV)]" 1>&2; }
"""

  pushd $work_dir
  $REPO_ROOT/bin/ysh $name.txt
  popd

  # My own dev tools
  if test -d ~/vm-shared {
    local path=_release/VERSION/doc/$name.html
    cp -v $path ~/vm-shared/$path
  }
}

proc one {
  ### Iterate on one doc quickly

  local name=${1:-options}

  split-and-render doc/$name.md

  # Make sure the doc has valid YSH code?
  # TODO: Maybe need an attribute for OSH or YSH
  pushd _tmp/code-blocks/doc
  $REPO_ROOT/bin/ysh $name.txt
  popd

  if test -d ~/vm-shared {
    local out="${name%.md}.html"
    local path=_release/VERSION/$out
    cp -v $path ~/vm-shared/$path
  }
}

proc make-dirs {
  mkdir -p $TMP_DIR $CODE_BLOCK_DIR $TEXT_DIR $HTML_DIR/doc
}

proc one-ref {
  local md=${1:-doc/ref/index.md}
  split-and-render $md '' '../../web'
}

proc all-ref {
  ### Build doc/ref in text and HTML.  Depends on libcmark.so

  log "Removing $TEXT_DIR/*"
  rm -f $TEXT_DIR/*
  make-dirs

  # Make the indexes and chapters
  for d in doc/ref/*.md {
    split-and-render $d '' '../../web'
  }

  # Text cards
  cards-from-indices
  # A few text cards, and HELP_TOPICS dict for URLs, for flat namespace
  cards-from-chapters

  if command -v pysum {
    # 19 KB of embedded help, seems OK.  Biggest card is 'ysh-option'.  Could
    # compress it.
    echo 'Size of embedded help:'
    ls -l $TEXT_DIR | tee /dev/stderr | awk '{print $5}' | pysum
  }

  # Better sorting
  #LANG=C ls -l $TEXT_DIR
}

proc _copy-path {
  local src=$1 dest=$2
  mkdir -p $(dirname $dest)
  cp -v $src $dest
}

proc copy-web {
  find web \
    '(' -name _tmp -a -prune ')' -o \
    '(' -name '*.css' -o -name '*.js' ')' -a -printf '%p _release/VERSION/%p\n' |
  xargs -n 2 -- $0 _copy-path
}

proc pretty-size {
  local path=$1
  stat --format '%s' $path | python -c '
import sys
num_bytes = int(sys.stdin.read())
print "{:,}".format(num_bytes)
'
}

# NOTE: It might be better to link to files like this in the /release/ tree.
# Although I am not signing them.

# https://nodejs.org/dist/v8.11.4/SHASUMS256.txt.asc

proc tarball-links-row-html {
  local version=$1

  cat <<< """
<tr class="file-table-heading">
  <td></td>
  <td>File / SHA256 checksum</td>
  <td class="size">Size</td>
  <td></td>
</tr>
"""

  # we switched to .gz for oils for Unix
  for name in oil-$version.tar.{gz,xz} \
    oils-for-unix-$version.tar.{gz,xz} \
    oil-native-$version.tar.xz {

    local url="/download/$name"  # The server URL
    local path="../oilshell.org__deploy/download/$name"

    # Don't show tarballs that don't exist
    if [[ $name == oils-for-unix-* && ! -f $path ]] {
      continue
    }
    if [[ $name == oil-native-* && ! -f $path ]] {
      continue
    }

    local checksum
    setvar checksum = $(sha256sum $path | awk '{print $1}')
    local size
    setvar size = $(pretty-size $path)

    # TODO: Port this to oil with "commas" extension.

    # Three columns: date, version, and links
    cat <<< """
    <tr> 
      <td></td>
      <td class="filename"><a href="$url">$name</a></td>
      <td class="size">$size</td>
    </tr>
    <tr>
      <td></td>
      <td colspan=2 class="checksum">$checksum</td>
    </tr>
"""
  }
}

proc this-release-links {
  echo '<div class="file-table">'
  echo '<table>'
  tarball-links-row-html $OIL_VERSION
  echo '</table>'
  echo '</div>'
}

# Turn HTML comment into a download link
proc add-date-and-links {
  local snippet
  setvar snippet = $(this-release-links)

  awk -v date=$1 -v snippet="$snippet" '
    /<!-- REPLACE_WITH_DOWNLOAD_LINKS -->/ {
      print(snippet)
      next
    }

    /<!-- REPLACE_WITH_DATE -->/ {
      print(date)
      next
    }

    # Everything else
    { print }
  '
}

proc modify-pages {
  local release_date
  setvar release_date = $(cat _build/release-date.txt)

  local root=_release/VERSION

  add-date-and-links $release_date < _tmp/release-index.html > $root/index.html
  add-date-and-links $release_date < _tmp/release-quality.html > $root/quality.html
}

proc run-for-release {
  ### Build a tree.  Requires _build/release-date.txt to exist

  local root=_release/VERSION
  mkdir -p $root/{doc,test,pub}

  tour

  # Metadata
  cp -v _build/release-date.txt oil-version.txt $root

  # Docs
  # Writes _release/VERSION and _tmp/release-index.html
  all-markdown
  all-ref
  all-redirects  # backward compat

  modify-pages

  # Problem: You can't preview it without .wwz!
  # Maybe have local redirects VERSION/test/wild/ to 
  #
  # Instead of linking, I should compress them all here.

  copy-web

  if command -v tree >/dev/null {
    tree $root
  } else {
    find $root
  }
}

proc soil-run {
  build/stamp.sh write-release-date

  run-for-release
}

@ARGV

    (DONE build/doc.sh)
#!/usr/bin/env bash
#
# Usage:
#   build/dev-setup-test.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

proc smoke-test {
  ### For the fast possible development experience

  # To put python2 WEDGE in $PATH
  source build/dev-shell.sh

  bin/osh -c 'echo HI osh python $OILS_VERSION'
  bin/ysh -c 'echo HI ysh python $OILS_VERSION'

  ninja

  _bin/cxx-asan/osh -c 'echo HI osh C++ $OILS_VERSION'
  _bin/cxx-asan/ysh -c 'echo HI ysh C++ $OILS_VERSION'
}

@ARGV
    (DONE build/dev-setup-test.sh)
# Usage:
#   source build/common.sh

# Include guard.
test -n ${__BUILD_COMMON_SH:-} && return
readonly __BUILD_COMMON_SH=1

if test -z ${REPO_ROOT:-} {
  echo 'build/common.sh: $REPO_ROOT should be set before sourcing'
  exit 1
}

set -o nounset
set -o errexit
#eval 'set -o pipefail'

# New version is slightly slower -- 13 seconds vs. 11.6 seconds on oils-for-unix
readonly CLANG_DIR_RELATIVE='../oil_DEPS/clang+llvm-14.0.0-x86_64-linux-gnu-ubuntu-18.04'

setvar CLANG_DIR_1 = "$REPO_ROOT/$CLANG_DIR_RELATIVE"
setvar CLANG_DIR_FALLBACK = "~/git/oilshell/oil/$CLANG_DIR_RELATIVE"
if test -d $CLANG_DIR_1 {
  setvar CLANG_DIR = "$CLANG_DIR_1"
  setvar CLANG_IS_MISSING = ''''
} else {
  # BUG FIX: What if we're building _deps/ovm-build or ../benchmark-data/src?
  # Just hard-code an absolute path.  (We used to use $PWD, but I think that
  # was too fragile.)
  setvar CLANG_DIR = "$CLANG_DIR_FALLBACK"
  setvar CLANG_IS_MISSING = ''T''
}
readonly CLANG_DIR

readonly CLANG=$CLANG_DIR/bin/clang  # used by benchmarks/{id,ovm-build}.sh
readonly CLANGXX=$CLANG_DIR/bin/clang++

# I'm not sure if there's a GCC version of this?
export ASAN_SYMBOLIZER_PATH=$CLANG_DIR_RELATIVE/bin/llvm-symbolizer

# ThreadSanitizer doesn't always give us all locations, but this doesn't help
# export TSAN_SYMBOLIZER_PATH=$ASAN_SYMBOLIZER_PATH

# equivalent of 'cc' for C++ language
# https://stackoverflow.com/questions/172587/what-is-the-difference-between-g-and-gcc
setvar CXX = ${CXX:-'c++'}

# Compiler flags we want everywhere.
# - -Weverything is more than -Wall, but too many errors now.
# - -fno-omit-frame-pointer is what Brendan Gregg says should always be on.
#   Omitting the frame pointer might be neglibly faster, but reduces
#   observability.  It's required for the 'perf' tool and other kinds of tracing.
#   Anecdotally the speed difference was in the noise on parsing
#   configure-coreutils.  
# - TODO(6/22): Disabled invalid-offsetof for now, but we should enable it after
#   progress on the garbage collector.  It could catch bugs.

# Allow user to override both BASE_CXXFLAGS and CXXFLAGS
# There doesn't seem to be a well-known convention for this.  Similar to this
# question:
# - https://stackoverflow.com/questions/51606653/allowing-users-to-override-cflags-cxxflags-and-friends

setvar default_cxx_flags = ''-std=c++11 -Wall -Wno-invalid-offsetof -fno-omit-frame-pointer''

# note: Use - and not :- so that BASE_CXXFLAGS= works
setvar BASE_CXXFLAGS = ${BASE_CXXFLAGS-$default_cxx_flags}

readonly PY27=Python-2.7.13

readonly PREPARE_DIR=$REPO_ROOT/../oil_DEPS/cpython-full

proc log {
  echo @ARGV >&2
}

proc die {
  log "$0: FATAL: $[join(ARGV)]"
  exit 1
}

proc can-compile-32-bit {
  # Try compiling a basic file
  c++ -m32 -o /dev/null build/detect-cc.c
}
    (DONE build/common.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/gperftools.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

# Hm these appear to be ancient versions, google-pprof --version says 2.0, but
# we're on 2.7
#
# https://github.com/gperftools/gperftools/releases

proc uninstall {
  sudo apt remove google-perftools libgoogle-perftools-dev
}

# /usr/local/bin/pprof also seems to have the 2.0 version number!
proc download {
  wget --directory _deps \
    'https://github.com/gperftools/gperftools/releases/download/gperftools-2.7/gperftools-2.7.tar.gz'
}

readonly OILS_CPP='_bin/oils-for-unix.tcmalloc '

proc collect-small '{
  HEAPPROFILE=_tmp/small-parse.hprof' $OILS_CPP -c 'echo hi'

  echo 'echo hi' '> _tmp/h.sh
  HEAPPROFILE=_tmp/small-eval.hprof' $OILS_CPP -n _tmp/h.sh
}

proc collect-big {
  #local path=benchmarks/testdata/configure
  local path=${1:-configure}'

  HEAPPROFILE=_tmp/big-parse.hprof' $OILS_CPP --ast-format none -n $path'

  # Run 200 iterations of fib(44).  Got about 18 MB of heap usage.
  # (This matches the 200 iterations in benchmarks/compute.sh, which shows 60
  # MB max RSS)
  HEAPPROFILE=_tmp/big-eval.hprof' $OILS_CPP benchmarks/compute/fib.sh 200 44
}

# e.g. pass _tmp/osh_parse.hprof.0001.heap
proc browse {
  ### Open it in a browser
  pprof --web $OILS_CPP @ARGV
}

proc svg {
  local in=$1
  local out=${in%.heap}.svg
  pprof --svg $OILS_CPP @ARGV > $out

  echo "Wrote $out"
}

@ARGV
    (DONE benchmarks/gperftools.sh)
#!/usr/bin/env bash
#
# A pure string-processing benchmark extracted from bash-completion.
#
# Note: most stuff moved to benchmarks/compute.
#
# Usage:
#   benchmarks/parse-help.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)
source build/common.sh

readonly DATA_DIR='benchmarks/parse-help'
readonly EXCERPT=benchmarks/parse-help/excerpt.sh

proc collect {
  mkdir -p $DATA_DIR

  ls --help > $DATA_DIR/ls.txt
  ~/.local/bin/mypy --help > $DATA_DIR/mypy.txt

  wc -l $DATA_DIR/*
}

proc collect-clang {
  $CLANGXX --help > $DATA_DIR/clang.txt
}

proc shorten {
  egrep '^[ ]+-' $DATA_DIR/ls.txt | head -n 2 | tee $DATA_DIR/ls-short.txt
}

setvar TIMEFORMAT = ''%U''

# Geez:
#        ls     mypy
# bash   25ms   25ms
# OSH   600ms  900ms   There is a lot of variance here too.

# Well I guess that is 25x slower?  It's a computationally expensive thing.
# Oh part of this is because printf is not a builtin!  Doh.
#
# TODO
# - count the number of printf invocations.  But you have to do it recursively!
# - Turn this into a proper benchmark with an HTML page.

proc one {
  local sh='bin/osh'
  local cmd='ls-short'
  export PS4='+[${LINENO}:${FUNCNAME[0]}] '
  time cat $DATA_DIR/$cmd.txt | $sh -x $EXCERPT _parse_help -
}

proc compare-one {
  local cmd='ls-short'
  time cat $DATA_DIR/$cmd.txt | bin/osh $EXCERPT _parse_help -
  echo ---
  time cat $DATA_DIR/$cmd.txt | bash $EXCERPT _parse_help -
}

@ARGV
    (DONE benchmarks/parse-help.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/id-test.sh <function name>

source benchmarks/id.sh
source test/common.sh

set -o nounset
set -o pipefail
set -o errexit

proc test-shell-prov {
  shell-provenance-2 no-host 2022-12-29 _tmp/ \
    bin/osh
}

proc test-out-param {
  local mylocal

  out-param mylocal
  assert $mylocal = 'returned'

  echo "mylocal=$mylocal"
}

proc test-compiler-id {
  dump-compiler-id $(which gcc)

  if test -f $CLANG {
    dump-compiler-id $CLANG
  }

  head _tmp/compiler-id/*/version.txt
}

proc soil-run {
  run-test-funcs
}

@ARGV
    (DONE benchmarks/id-test.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/time-test.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source test/common.sh
source test/tsv-lib.sh

# TODO: This would be a nice little program for Oil
proc count-lines-and-cols {
  python2 -c '
import sys

expected_num_lines = int(sys.argv[1])
expected_num_cols = int(sys.argv[2])
try:
  sep = sys.argv[3]
except IndexError:
  sep = "\t"

num_lines = 0
tab_counts = []
for line in sys.stdin:
  tab_counts.append(line.count(sep))
  num_lines += 1
  # Show what we get
  sys.stdout.write(line)

if any(tab_counts[0] != n for n in tab_counts):
  raise AssertionError(tab_counts)

num_tabs = tab_counts[0]

assert expected_num_lines == num_lines, \
  "expected %d lines, got %d" % (expected_num_lines, num_lines)
assert expected_num_cols == num_tabs + 1, \
  "expected %d cols, got %d" % (expected_num_cols, num_tabs + 1)
' @ARGV
}

proc time-tool {
  $(dirname $0)/time_.py @ARGV
}

proc test-csv {
  local out=_tmp/time.csv

  time-tool -o $out -- echo hi
  cat $out | count-lines-and-cols 1 2 ,

  time-tool -o $out --field a --field b -- echo hi
  cat $out | count-lines-and-cols 1 4 ,

  time-tool -o $out --rusage -- echo hi
  cat $out | count-lines-and-cols 1 5 ,

  time-tool -o $out --rusage --field a --field b -- echo hi
  cat $out | count-lines-and-cols 1 7 ,
}

proc test-tsv {
  local out=_tmp/time.tsv
  rm -f $out

  for i in 1 2 3 {
    time-tool --tsv -o $out --append --time-fmt '%.2f' -- sleep 0.0${i}
  }
  cat $out | count-lines-and-cols 3 2

  time-tool --tsv -o $out --field a --field b -- echo hi
  cat $out | count-lines-and-cols 1 4 

  time-tool --tsv -o $out --rusage --field a --field b -- echo hi
  cat $out | count-lines-and-cols 1 7
}

proc test-append {
  local out=_tmp/overwrite.tsv
  for i in 4 5 {
    time-tool --tsv -o $out -- sleep 0.0${i}
  }
  cat $out | count-lines-and-cols 1 2

  echo ---

  local out=_tmp/append.tsv
  rm -f $out

  for i in 4 5 {
    time-tool --tsv -o $out --append -- sleep 0.0${i}
  }
  cat $out | count-lines-and-cols 2 2
}

proc test-usage {
  # no args
  set +o errexit

  time-tool; setvar status = ""$?
  assert $status -eq 2

  time-tool --output; setvar status = ""$?
  assert $status -eq 2

  time-tool sleep 0.1
  time-tool --append sleep 0.1; setvar status = ""$?
  assert $status -eq 0

  set -o errexit
}

proc test-bad-tsv-chars {
  local out=_tmp/time2.tsv
  rm -f $out

  set +o errexit

  # Newline should fail
  time-tool --tsv -o $out --field $'\n' -- sleep 0.001; setvar status = ""$?
  assert $status -eq 1

  # Tab should fail
  time-tool --tsv -o $out --field $'\t' -- sleep 0.001; setvar status = ""$?
  assert $status -eq 1

  # Quote should fail
  time-tool --tsv -o $out --field '"' -- sleep 0.001; setvar status = ""$?
  assert $status -eq 1

  # Backslash is OK
  time-tool --tsv -o $out --field '\' -- sleep 0.001; setvar status = ""$?
  assert $status -eq 0

  # Space is OK, although canonical form would be " "
  time-tool --tsv -o $out --field ' ' -- sleep 0.001; setvar status = ""$?
  assert $status -eq 0

  set -o errexit

  cat $out

  echo $'OK\ttest-bad-tsv-chars'
}

proc test-stdout {
  local out=_tmp/time-stdout.csv
  time-tool -o $out --stdout _tmp/stdout.txt -- seq 3

  diff _tmp/stdout.txt - <<< """
1
2
3
"""

  # No assertions here yet
  md5sum _tmp/stdout.txt
  cat $out | count-lines-and-cols 1 3 ,

  time-tool -o $out --rusage --stdout _tmp/stdout.txt -- seq 3
  cat $out | count-lines-and-cols 1 6 ,
}

proc test-rusage {
  # No assertions here yet

  local out=_tmp/time-usage.csv
  time-tool --tsv -o $out --rusage -- bash -c 'echo bash'
  cat $out | count-lines-and-cols 1 5

  #time-tool --tsv -o $out --rusage -- dash -c 'echo dash'
  #cat $out

  # Blow up memory size for testing
  local py='a=[42]*500000; print "python"'

  time-tool --tsv -o $out --rusage -- python2 -c $py
  cat $out | count-lines-and-cols 1 5

  #time-tool --tsv -o $out --rusage -- bin/osh -c 'echo osh'
  #cat $out
}

# Compare vs. /usr/bin/time.
proc test-maxrss {
  if which time {  # Ignore this on continuous build
    command time --format '%x %U %M' -- seq 1
  }

  # Showing a discrepancy.  FIXED!
  time-tool -o _tmp/maxrss --tsv --rusage -- seq 1
  cat _tmp/maxrss
}

proc test-print-header {
  set +o errexit

  # no arguments allowed
  time-tool --tsv --print-header foo bar
  assert $? -eq 2

  time-tool --tsv --print-header --field name
  assert $? -eq 0

  time-tool --tsv --print-header --rusage --field name
  assert $? -eq 0

  time-tool --print-header --rusage --field foo --field bar
  assert $? -eq 0

  time-tool -o _tmp/time-test-1 \
    --print-header --rusage --stdout DUMMY --tsv --field a --field b
  assert $? -eq 0

  #set -x
  head _tmp/time-test-1
}

proc test-time-helper {
  set +o errexit

  local tmp=_tmp/time-helper.txt

  local th=_devbuild/bin/time-helper

  # Make some work show up
  local cmd='{ md5sum */*.md; sleep 0.15; exit 42; } > /dev/null'

  echo 'will be overwritten' > $tmp
  cat $tmp

  $th
  assert $? -ne 0  # it's 1, but could be 2

  $th /bad
  assert $? -eq 1

  $th -o $tmp -d $'\t' -x -e -- sh -c $cmd
  assert $? -eq 42
  cat $tmp
  echo

  # Now append
  $th -o $tmp -a -d , -x -e -U -S -M -- sh -c $cmd
  assert $? -eq 42
  cat $tmp
  echo
  
  # Error case
  $th -z
  assert $? -eq 2
}

proc test-time-tsv {
  local status

  local out=_tmp/time-test-zz
  rm -f -v $out

  # Similar to what soil/worker.sh does
  set +o errexit
  time-tsv -o $out --append -- zz
  setvar status = ""$?
  set -o errexit

  echo status=$status
  assert $status -eq 1

  cat $out
  echo
}

proc test-grandchild-memory {
  local -a use_mem=( python2 -c 'import sys; ["X" * int(sys.argv[1])]' 10000000 )

  time-tsv -o /dev/stdout --rusage -- ${use_mem[@]}

  # RUSAGE_CHILDREN includes grandchildren!
  time-tsv -o /dev/stdout --rusage -- sh -c 'echo; "$@"' dummy ${use_mem[@]}

  # 'exec' doesn't make a consistent difference, because /bin/sh doesn't use
  # much memory
  time-tsv -o /dev/stdout --rusage -- sh -c 'echo; exec "$@"' dummy ${use_mem[@]}
}

proc soil-run {
  run-test-funcs
}

@ARGV
    (DONE benchmarks/time-test.sh)
#!/usr/bin/env bash
#
# Do a quick test of virtual memory.
#
# Note: This is probably very similar to max RSS of
# testdata/osh-runtime/hello-world.sh, so it could be retired.
#
# Usage:
#   benchmarks/vm-baseline.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

source test/common.sh  # log
source benchmarks/common.sh

readonly BASE_DIR=_tmp/vm-baseline

proc measure {
  local provenance=$1
  local host_job_id=$2
  local base_dir=${3:-_tmp/vm-baseline}

  local out_dir="$base_dir/$host_job_id"
  mkdir -p $out_dir

  # TODO:
  # print-tasks should:
  # - use the whole shell path like _bin/osh
  # - the host name should be a column
  # - the join ID can be a file, and construct the task name from that
  # - Then maybe use tsv_columns_from_files.py like we do with cachegrind

  # - should not
  #   - get shell name from the filename
  #   - get host name from the filename
  # - should use TSV files

  # Fourth column is the shell.
  cat $provenance | filter-provenance ${SHELLS[@]} $OSH_CPP_REGEX |
  while read _ _ _ sh_path shell_hash {

    local sh_name
    setvar sh_name = $(basename $sh_path)

    local out="$out_dir/${sh_name}-${shell_hash}.txt"

    # There is a race condition on the status but sleep helps.
    # Bug fix: ALIVE to prevent exec optimization in OSH and zsh.
    $sh_path -c 'sleep 0.001; cat /proc/$$/status; echo ALIVE' > $out
  }

  echo
  echo "$out_dir:"
  ls -l $out_dir
}

# Run a single file through stage 1 and report.
proc demo {
  local -a job_dirs=($BASE_DIR/lisa.2017-*)
  local dir1=$BASE_DIR/stage1
  local dir2=$BASE_DIR/stage2

  mkdir -p $dir1 $dir2
  
  benchmarks/virtual_memory.py baseline ${job_dirs[-1]} \
    > $dir1/vm-baseline.csv

  benchmarks/report.R vm-baseline $dir1 $dir2
}

# Combine CSV files.
proc stage1 {
  local raw_dir=${1:-$BASE_DIR/raw}
  local single_machine=${2:-}

  local out=$BASE_DIR/stage1
  mkdir -p $out

  local base_dir=

  local -a raw=()

  if test -n $single_machine {
    setvar base_dir = '_tmp/vm-baseline'
    local -a m1=( $base_dir/$single_machine.* )
    setvar raw = ''( ${m1[-1]} )
  } else {
    setvar base_dir = '../benchmark-data/vm-baseline'
    # Globs are in lexicographical order, which works for our dates.
    local -a m1=( $base_dir/$MACHINE1.* )
    local -a m2=( $base_dir/$MACHINE2.* )

    setvar raw = ''( ${m1[-1]} ${m2[-1]} )
  }

  benchmarks/virtual_memory.py baseline ${raw[@]} \
    | tee $out/vm-baseline.csv
}

proc print-report {
  local in_dir=$1

  benchmark-html-head 'Virtual Memory Baseline'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""

  cmark <<< '''
## Virtual Memory Baseline

Source code: [oil/benchmarks/vm-baseline.sh](https://github.com/oilshell/oil/tree/master/benchmarks/vm-baseline.sh)

### Memory Used at Startup (MB)

Memory usage is measured in MB (powers of 10), not MiB (powers of 2).

'''
  csv2html $in_dir/vm-baseline.csv

  # R code doesn't generate this
  if false {
    cmark <<< '### Shell and Host Details'

    csv2html $in_dir/shells.csv
    csv2html $in_dir/hosts.csv
  }

  cat <<< """
  </body>
</html>
"""
}


#
# Other
#

proc soil-run {
  ### Run it on just this machine, and make a report

  rm -r -f $BASE_DIR
  mkdir -p $BASE_DIR

  local -a oil_bin=( $OSH_CPP_NINJA_BUILD )
  ninja ${oil_bin[@]}

  local single_machine='no-host'

  local job_id
  setvar job_id = $(benchmarks/id.sh print-job-id)

  benchmarks/id.sh shell-provenance-2 \
    $single_machine $job_id _tmp \
    bash dash bin/osh ${oil_bin[@]}

  # TODO: measure* should use print-tasks | run-tasks
  local provenance=_tmp/provenance.txt
  local host_job_id="$single_machine.$job_id"

  measure $provenance $host_job_id

  # Make it run on one machine
  stage1 '' $single_machine 

  benchmarks/report.sh stage2 $BASE_DIR
  benchmarks/report.sh stage3 $BASE_DIR
}

@ARGV
    (DONE benchmarks/vm-baseline.sh)
#!/usr/bin/env bash
#
# A string processing test case copied from bash_completion.

shopt -s extglob  # needed for Oil, but not bash

# This function shell-quotes the argument
proc quote {
    local quoted=${1//\'/\'\\\'\'}
    printf "'%s'" $quoted
}

# This function shell-dequotes the argument
proc dequote {
    eval printf %s $1 2> /dev/null
}

# Helper function for _parse_help and _parse_usage.
proc __parse_options {
    local option option2 i IFS=$' \t\n,/|'

    # Take first found long option, or first one (short) if not found.
    setvar option = ''
    local -a array
    read -a array <<<"$1"
    for i in "${array[@]}" {
        case (i) {
            ---* { break }
            --?* { setvar option = "$i" ; break }
            -?* {  [[ $option ]] || setvar option = "$i" }
            * {    break }
        }
    }
    [[ $option ]] || return

    setvar IFS = '$' \t\n'' # affects parsing of the regexps below...

    # Expand --[no]foo to --foo and --nofoo etc
    if [[ $option =~ (\[((no|dont)-?)\]). ]] {
        setvar option2 = ${option/"${BASH_REMATCH[1]}"/}
        setvar option2 = ${option2%%[<{().[]*}
        printf '%s\n' ${option2/=*/=}
        setvar option = ${option/"${BASH_REMATCH[1]}"/"${BASH_REMATCH[2]}"}
    }

    setvar option = ${option%%[<{().[]*}
    printf '%s\n' ${option/=*/=}
}

# Parse GNU style help output of the given command.
# @param $1  command; if "-", read from stdin and ignore rest of args
# @param $2  command options (default: --help)
#
proc _parse_help {
    eval local cmd=$( quote "$1" )
    local line
    do { case (cmd) {
        - { cat }
        * {' LC_ALL=C' $( dequote "$cmd" ) ${2:---help} 2>&1 }
      } } \
    | while read -r line { {
            setvar line = ${line/"${BASH_REMATCH[0]}"/"${BASH_REMATCH[1]}"}
        }
        __parse_options ${line// or /, }

    }
}

@ARGV

    (DONE benchmarks/parse-help/excerpt.sh)
#!/usr/bin/env bash
#
# A string processing test case copied from bash_completion.

shopt -s extglob  # needed for Oil, but not bash

# This function shell-quotes the argument
proc quote {
    local quoted=${1//\'/\'\\\'\'}
    printf "'%s'" $quoted
}

# This function shell-dequotes the argument
proc dequote {
    eval printf %s $1 2> /dev/null
}

# Helper function for _parse_help and _parse_usage.
proc __parse_options {
    local option option2 i IFS=$' \t\n,/|'

    # Take first found long option, or first one (short) if not found.
    setvar option = ''
    local -a array=( $1 )  # relies on word splitting
    for i in "${array[@]}" {
        case (i) {
            ---* { break }
            --?* { setvar option = "$i" ; break }
            -?* {  [[ $option ]] || setvar option = "$i" }
            * {    break }
        }
    }
    [[ $option ]] || return

    setvar IFS = '$' \t\n'' # affects parsing of the regexps below...

    # Expand --[no]foo to --foo and --nofoo etc
    if [[ $option =~ (\[((no|dont)-?)\]). ]] {
        setvar option2 = ${option/"${BASH_REMATCH[1]}"/}
        setvar option2 = ${option2%%[<{().[]*}
        printf '%s\n' ${option2/=*/=}
        setvar option = ${option/"${BASH_REMATCH[1]}"/"${BASH_REMATCH[2]}"}
    }

    setvar option = ${option%%[<{().[]*}
    printf '%s\n' ${option/=*/=}
}

# Parse GNU style help output of the given command.
# @param $1  command; if "-", read from stdin and ignore rest of args
# @param $2  command options (default: --help)
#
proc _parse_help {
    while read -r line { {
            setvar line = ${line/"${BASH_REMATCH[0]}"/"${BASH_REMATCH[1]}"}
        }
        __parse_options ${line// or /, }

    }
}

# My addition
proc parse_help_file {
  _parse_help - < "$1"
}

@ARGV

    (DONE benchmarks/parse-help/pure-excerpt.sh)
#!/usr/bin/env bash
#
# Summary: PyPy is slower than CPython for parsing.  (I bet it also uses more
# memory, although I didn't measure that.)
#
# I don't plan on using PyPy, but this is simple enough to save for posterity.
#
# Usage:
#   ./pypy.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

readonly PYPY=~/install/pypy2-v5.9.0-linux64/bin/pypy

readonly ABUILD=~/git/alpine/abuild/abuild 

proc parse-abuild {
  local vm=$1
  local out=_tmp/pypy
  mkdir -p $out

  time $vm bin/oil.py osh \
    --dump-proc-status-to $out/proc-status.txt \
    -n $ABUILD >/dev/null
}

# ~3.5 seconds
proc parse-with-cpython {
  parse-abuild python
}

# ~4.8 seconds
# NOTE: We could run it in a loop to see if the JIT warms up, but that would
# only be for curiosity.  Most shell processes are short-lived, so it's the
# wrong thing to optimize for.
proc parse-with-pypy {
  parse-abuild $PYPY
}

@ARGV
    (DONE benchmarks/pypy.sh)
#!/usr/bin/env bash
#
# Measure the time it takes to build a binary with different compilers on
# different machines, and measure the binary size.
#
# Usage:
#   ./ovm-build.sh <function name>
#
# Run on its own:
#   1. Follow common instructions in benchmarks/osh-parser.sh
#   2. benchmarks/auto.sh measure-builds
#   3. benchmarks/report.sh ovm-build

# Directories used:
#
# oilshell.org/blob/
#  ovm-build/
#
# ~/git/oilshell/
#   oil/
#     _deps/
#       ovm-build  # tarballs and extracted source
#     _tmp/
#       ovm-build/  
#         raw/     # output CSV
#         stage1
#   benchmark-data/
#     ovm-build/
#       raw/
#     compiler-id/
#     host-id/

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd $(dirname $0)/..; pwd)
readonly REPO_ROOT

source test/tsv-lib.sh  # uses REPO_ROOT
source benchmarks/common.sh  # for log, etc.
source build/common.sh  # for $CLANG

readonly BASE_DIR=_tmp/ovm-build
readonly TAR_DIR=$PWD/_deps/ovm-build  # Make it absolute

#
# Dependencies
#

# Leave out mksh for now, because it doesn't follow ./configure make.  It just
# has Build.sh.
readonly -a TAR_SUBDIRS=( bash-4.4 dash-0.5.9.1 )  # mksh )

# NOTE: Same list in oilshell.org/blob/run.sh.
proc tarballs {
  cat <<< """
bash-4.4.tar.gz
dash-0.5.9.1.tar.gz
mksh-R56c.tgz
"""
}

proc download {
  mkdir -p $TAR_DIR
  tarballs | xargs -n 1 -I {} --verbose -- \
    wget --no-clobber --directory $TAR_DIR 'https://www.oilshell.org/blob/ovm-build/{}'
}

# Done MANUALLY.
proc extract-other {
  time for f in $TAR_DIR/*gz {
    tar -x --directory $TAR_DIR --file $f 
  }
}

# Done automatically by 'measure' function.
#
# NOTE: We assume that _release/oil.tar exists.  It should be made by
# scripts/release.sh build-and-test or benchmark-build.
proc extract-oil {
  # This is different than the others tarballs.
  rm -r -f -v $TAR_DIR/oil-*
  tar -x --directory $TAR_DIR --file _release/oil.tar

  # To run on multiple machines, use the one in the benchmarks-data repo.
  cp --recursive --no-target-directory \
    ../benchmark-data/src/oils-for-unix-$OIL_VERSION/ \
    $TAR_DIR/oils-for-unix-$OIL_VERSION/
}

#
# Measure Size of Binaries.
#

# Other tools:
# - bloaty to look inside elf file
# - nm?  Just a flat list of symbols?  Counting them would be nice.
# - zipfile.py to look inside bytecode.zip

proc sizes-tsv {
  # host_label matches the times.tsv file output by report.R
  tsv-row host_label num_bytes path
  local host=$(hostname)
  find @ARGV -maxdepth 0 -printf "$host\t%s\t%p\n"
}

# NOTE: This should be the same on all x64 machines.  But I want to run it on
# x64 machines.
proc measure-sizes {
  local prefix=${1:-$BASE_DIR/raw/demo}

  # PROBLEM: Do I need provenance for gcc/clang here?  I can just join it later
  # in R.

  # clang/oils-for-unix
  # clang/oils-for-unix.stripped
  # gcc/oils-for-unix
  # gcc/oils-for-unix.stripped
  sizes-tsv $BASE_DIR/bin/*/{oils-for-unix,oils-for-unix.stripped} \
    > ${prefix}.native-sizes.tsv

  sizes-tsv $TAR_DIR/oil-$OIL_VERSION/_build/oil/bytecode-opy.zip \
    > ${prefix}.bytecode-size.tsv

  sizes-tsv $BASE_DIR/bin/*/oil.* \
    > ${prefix}.bin-sizes.tsv

  sizes-tsv $BASE_DIR/bin/*/*sh \
    > ${prefix}.other-shell-sizes.tsv

  log "Wrote ${prefix}.*.tsv"
}

#
# Unused Demos
#

proc bytecode-size {
  local zip=_build/oil/bytecode.zip

  # 242 files, 1.85 MB
  unzip -l $zip | tail -n 1 

  # 1.88 MB, so there's 30K of header overhead.
  ls -l $zip
}

# 6.8 seconds for debug build, instead of 8 seconds.
proc clang-oil-dbg {
  make clean"
  CC=$CLANG" make _build/oil/ovm-dbg
}

#
# Measure Elapsed Time
#

# Add --target-size?  Add that functionality to benchmarks/time.py?
#
# Should we add explicit targets?
#   - ovm-clang, ovm-clang-dbg
#   - ovm-gcc, ovm-gcc-dbg
#
# It would be possible, but it complicates the makefile.

proc build-task {
  local out_dir=$1
  local job_id=$2
  local host=$3
  local host_hash=$4
  local compiler_path=$5
  local compiler_hash=$6
  local src_dir=$7
  local action=$8

  local times_out="$PWD/$out_dir/$host.$job_id.times.tsv"

  # Definitions that depends on $PWD.
  local -a TIME_PREFIX=(
    time-tsv \
    --append \
    --output $times_out \
    --field "$host" --field "$host_hash" \
    --field "$compiler_path" --field "$compiler_hash" \
    --field "$src_dir" --field "$action"
  )
  local bin_base_dir=$PWD/$BASE_DIR/bin

  local bin_dir="$bin_base_dir/$(basename $compiler_path)"
  mkdir -p $bin_dir

  pushd $src_dir >/dev/null

  # NOTE: We're not saving the output anywhere.  We save the status, which
  # protects against basic errors.

  case (action) {
    (configure {
      ${TIME_PREFIX[@]} -- ./configure

      # Cleaning here relies on the ORDER of tasks.txt.  configure happens
      # before build.  The Clang build shouldn't reuse GCC objects!
      # It has to be done after configure, because the Makefile must exist!
      make clean
      }

    (make {
      ${TIME_PREFIX[@]} -- make CC=$compiler_path

      local target
      case (src_dir) {
        (*/bash* {
          setvar target = 'bash'
          }
        (*/dash* {
          setvar target = 'src/dash'
          }
      }

      strip $target
      cp -v $target $bin_dir
      }

    (oils-for-unix* {
      case (action) {
        (oils-for-unix {
          local variant='dbg'
          }
        (oils-for-unix.stripped {
          local variant='opt'
          }
        * {
          die "Invalid target"
          }
      }

      # Change the C compiler into the corresponding C++ compiler
      local compiler
      case (compiler_path) { 
        (*gcc {
          # note: we take provenance of /usr/bin/gcc, but the shell script runs 'c++'
          setvar compiler = ''cxx''
          }
        (*clang {
          # Note on slight mess: benchmarks/id.sh takes the provenanec of
          # $CLANG.  We translate that to 'clang' here, and
          # _build/oils.sh uses $CLANGXX.
          setvar compiler = ''clang''
          }
        * {
          die "Invalid compiler"
          }
      }

      ${TIME_PREFIX[@]} -- _build/oils.sh $compiler $variant

      # e.g. cp _bin/clang-opt-sh/oils-for-unix.stripped _tmp/ovm-build/bin/clang/
      local filename=$action
      cp -v _bin/$compiler-$variant-sh/$filename $bin_dir
      }

    * {
      local target=$action  # Assume it's a target like _bin/oil.ovm

      ${TIME_PREFIX[@]} -- make CC=$compiler_path $target

      cp -v $target $bin_dir
      }
  }

  popd >/dev/null

  log "DONE BUILD TASK $action $src_dir __ status=$?"
}

proc oil-tasks {
  local provenance=$1

  # NOTE: it MUST be a tarball and not the git repo, because we don't build
  # bytecode-*.zip!  We care about the "packager's experience".
  local oil_dir="$TAR_DIR/oil-$OIL_VERSION"
  local ofu_dir="$TAR_DIR/oils-for-unix-$OIL_VERSION"

  # Add 1 field for each of 5 fields.
  cat $provenance | while read line {
    # NOTE: configure is independent of compiler.
    echo $line $oil_dir configure
    echo $line $oil_dir _bin/oil.ovm
    echo $line $oil_dir _bin/oil.ovm-dbg

    echo $line $ofu_dir oils-for-unix
    echo $line $ofu_dir oils-for-unix.stripped
  }
}

proc other-shell-tasks {
  local provenance=$1

  # NOTE: it MUST be a tarball and not the git repo, because we do the build
  # of bytecode.zip!  We care about the "package experience".
  local tarball='_release/oil.0.5.alpha1.gz'

  # Add 1 field for each of 5 fields.
  cat $provenance | while read line {
    case (line) {
      # Skip clang for now.
      (*clang* {
        continue
        }
    }

    for dir in "${TAR_SUBDIRS[@]}" {
      echo $line $TAR_DIR/$dir configure
      echo $line $TAR_DIR/$dir make
    }
  }
}

# 5 releases: 0.0.0 to 0.4.0.  For now, just do the 0.5.alpha1 release, and
# show the drop.
proc oil-historical-tasks {
  echo 
}

# action is 'configure', a target name, etc.
readonly NUM_COLUMNS=7  # 5 from provenence, then tarball/target

proc measure {
  local provenance=$1  # from benchmarks/id.sh compiler-provenance
  local out_dir=${2:-$BASE_DIR/raw}

  extract-oil

  # Job ID is everything up to the first dot in the filename.
  local name=$(basename $provenance)
  local prefix=${name%.compiler-provenance.txt}  # strip suffix

  local times_out="$out_dir/$prefix.times.tsv"
  # NOTE: Do we need two raw dirs?
  mkdir -p $BASE_DIR/{raw,stage1,bin} $out_dir

  # TODO: the $times_out calculation is duplicated in build-task()

  # Write header of the TSV file that is appended to.
  tsv-row \
    status elapsed_secs \
    host_name host_hash compiler_path compiler_hash \
    src_dir action > $times_out

  local t1=$BASE_DIR/oil-tasks.txt
  local t2=$BASE_DIR/other-shell-tasks.txt

  oil-tasks $provenance > $t1
  other-shell-tasks $provenance > $t2

  #grep dash $t2 |
  #time cat $t1 |
  set +o errexit
  time cat $t1 $t2 | xargs --verbose -n $NUM_COLUMNS -- $0 build-task $out_dir 
  local status=$?
  set -o errexit

  if test $status -ne 0 {
    die "*** Some tasks failed. (xargs status=$status) ***"
  }

  measure-sizes $out_dir/$prefix

  cp -v $provenance $out_dir
}

#
# Data Preparation and Analysis
#

proc stage1 {
  local raw_dir=${1:-$BASE_DIR/raw}

  local out=$BASE_DIR/stage1
  mkdir -p $out

  local x
  local -a a b

  # Globs are in lexicographical order, which works for our dates.
  setvar x = "$out/times.tsv"
  setvar a = ''($raw_dir/$MACHINE1.*.times.tsv)
  setvar b = ''($raw_dir/$MACHINE2.*.times.tsv)
  tsv-concat ${a[-1]} ${b[-1]} > $x

  setvar x = "$out/bytecode-size.tsv"
  setvar a = ''($raw_dir/$MACHINE1.*.bytecode-size.tsv)
  setvar b = ''($raw_dir/$MACHINE2.*.bytecode-size.tsv)
  tsv-concat ${a[-1]} ${b[-1]} > $x

  setvar x = "$out/bin-sizes.tsv"
  setvar a = ''($raw_dir/$MACHINE1.*.bin-sizes.tsv)
  setvar b = ''($raw_dir/$MACHINE2.*.bin-sizes.tsv)
  tsv-concat ${a[-1]} ${b[-1]} > $x

  setvar x = "$out/native-sizes.tsv"
  setvar a = ''($raw_dir/$MACHINE1.*.native-sizes.tsv)
  setvar b = ''($raw_dir/$MACHINE2.*.native-sizes.tsv)
  #tsv-concat ${b[-1]} > $x
  tsv-concat ${a[-1]} ${b[-1]} > $x

  # NOTE: unused
  # Construct a one-column TSV file
  local raw_data_tsv=$out/raw-data.tsv
  do { echo 'path'
    echo ${a[-1]}
    echo ${b[-1]}
  } > $raw_data_tsv

  head $out/*
  wc -l $out/*
}

proc print-report {
  local in_dir=$1
  local base_url='../../web'

  benchmark-html-head 'OVM Build Performance'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""

  cmark <<< '''
## OVM Build Performance

Source code: [oil/benchmarks/osh-parser.sh](https://github.com/oilshell/oil/tree/master/benchmarks/osh-parser.sh)

### Time in Seconds by Host and Compiler

We measure the build speed of `bash` and `dash` for comparison.
'''

  # Highlighting clang makes this table easier to read.
  tsv2html \
    --css-class-pattern 'special ^gcc' \
    $in_dir/times.tsv

  cmark <<< '''
### Native Binary Size

'''
  tsv2html --css-class-pattern 'special ^gcc' $in_dir/native-sizes.tsv

  cmark <<< '''
### OVM Binary Size

The oil binary has two portions:

- Architecture-independent `bytecode.zip`
- Architecture- and compiler- dependent native code (`_build/oil/ovm*`)

'''
  # Highlight the "default" production build
  tsv2html --css-class-pattern 'special /gcc/oil.ovm$' $in_dir/sizes.tsv

  cmark <<< '''

### Host and Compiler Details
'''
  tsv2html $in_dir/hosts.tsv
  tsv2html $in_dir/compilers.tsv

  cat <<< """
  </body>
</html>
"""
}

@ARGV
    (DONE benchmarks/ovm-build.sh)
#!/usr/bin/env bash
#
# Test awk vs Python speed.
#
# On this hash table benchmark, Python is maybe 10% slower than gawk.  mawk is
# twice is fast as gawk (and bwk).
#
# Python has much more functionality, so it's not exactly a fair comparison,
# but it's instructive.
#
# Update: simply adding tolower() makes gawk much slower than Python (555 ms
# vs. 280 ms), and mawk is still much faster at 138 ms.
#
# Mawk is known to be fast?  Faster than Java on this benchmark.
# https://brenocon.com/blog/2009/09/dont-mawk-awk-the-fastest-and-most-elegant-big-data-munging-language/
#
# Usage:
#   ./awk-python.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

readonly FILES=(../*.sh ../*/*.sh ../*.py ../*/*.py ../*/*/*.py)

# Test out hash table implementations
# mawk is faster: 77ms vs 155ms for 10 iterations.
proc test-awk {
  for awk in gawk mawk ~/git/bwk/bwk {
    echo ---
    echo $awk
    echo ---
    time for i in {1..10} {
      $awk '
      { 
        line = tolower($0)
        num_lines += 1

        # NOTE: gawk has length(); mawk does not
        if (!(line in unique)) {
          num_unique += 1
        }
        unique[line] += 1
      }
      END {
        print "unique lines: " num_unique
        print "total lines: " num_lines
      }
      ' ${FILES[@]}

    }
  }
}

# Python VM is slower: 160-170 ms.  Oops.
#
# Well Python has more general dictionaries -- they take more than strings.
proc test-python {
  time for i in {1..10} {
    python -S -c '
import collections
import sys

num_lines = 0
num_unique = 0
unique = collections.defaultdict(int)

for path in sys.argv[1:]:
  with open(path) as f:
    for line in f:
      line = line.lower()
      num_lines += 1

      if line not in unique:
        num_unique += 1
      unique[line] += 1

print "unique lines: ", num_unique
print "total lines: ", num_lines
      ' ${FILES[@]}

  }
}

# Only 10-30 ms.  We are doing real work.
proc test-wc {
  time for i in {1..10} {
    cat ${FILES[@]} | wc -c
  }
}

proc files {
  echo ${FILES[@]}
  echo "${#FILES[@]} files"
}

@ARGV
    (DONE benchmarks/awk-python.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/mimalloc.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

# Docs: https://github.com/microsoft/mimalloc

readonly DIR=~/git/oilshell/mimalloc

proc build-ld-preload {
  gcc -Wall -fPIC -shared -o _tmp/ld_preload_hook.so demo/ld_preload_hook.c -ldl

  gcc -o _tmp/ld_preload_main demo/ld_preload_main.c
}

# 
# These work.  mimalloc doesn't work?
#

proc run-main-hook '{
  LD_PRELOAD=_tmp/ld_preload_hook.so' _tmp/ld_preload_main || true
}

proc run-osh-hook '{
  LD_PRELOAD=_tmp/ld_preload_hook.so' _bin/cxx-dbg/osh -c 'echo hi'
}


#
# Mimalloc
#

proc build-mimalloc {
  pushd $DIR

  # Unity build!
  # -fPIC for shared library
  gcc -O2 -fPIC -I include -o mimalloc.o -c src/static.c
  ls -l mimalloc.*

  # -lpthread required
  gcc -shared -o mimalloc.so mimalloc.o -lpthread

  popd
}

proc build-main {
  ### static build of main + mimalloc

  # Note that alloc.c #includes alloc-override.c

  gcc -O2 -I $DIR/include -o _tmp/mimalloc.o -c $DIR/src/static.c

  gcc -O2 -o _tmp/ld_preload_main.o -c demo/ld_preload_main.c
  file _tmp/ld_preload_main.o

  #gcc -o _tmp/main _tmp/ld_preload_main.o ../mimalloc/mimalloc.o -lpthread
  #gcc -o _tmp/main ../mimalloc/mimalloc.o _tmp/ld_preload_main.o -lpthread

  gcc -o _tmp/main _tmp/mimalloc.o _tmp/ld_preload_main.o -lpthread
  file _tmp/main

  nm _tmp/main | grep -i malloc

  set -x'
  MIMALLOC_VERBOSE=1' _tmp/main
}

# https://microsoft.github.io/mimalloc/environment.html

# Not working, try STATIC linking
# https://microsoft.github.io/mimalloc/overrides.html

proc run-main-mim '{
  # Doesn't show stats?
  # MIMALLOC_SHOW_STATS=1 LD_PRELOAD=$DIR/mimalloc.so ls

  # Works
  MIMALLOC_VERBOSE=1'" LD_PRELOAD=$DIR/mimalloc.so" \
    _tmp/ld_preload_main
}

proc run-osh-mim {
  local osh=_bin/cxx-opt/osh

  #local osh=_bin/cxx-opt/mycpp/demo/gc_header

  #local osh=_bin/cxx-dbg/osh

  ninja $osh'
  #export MIMALLOC_SHOW_STATS=1
  MIMALLOC_VERBOSE=1'" LD_PRELOAD=$DIR/mimalloc.so" \
     $osh @ARGV
}

# No stats?
proc osh-demo {
  run-osh-mim -c 'for i in $(seq 1000); do echo $i; done'
}



@ARGV
    (DONE benchmarks/mimalloc.sh)
#!/usr/bin/env bash
#
# Analyze how mycpp speeds up programs.
#
# Usage:
#   benchmarks/mycpp.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd $(dirname $0)/.. && pwd)
readonly REPO_ROOT

source benchmarks/common.sh
source build/dev-shell.sh  # R_LIBS_USER
source soil/common.sh  # find-dir-html
source test/tsv-lib.sh  # tsv2html

proc print-report {
  local in_dir=$1

  benchmark-html-head 'mycpp Code Generation'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""
  cmark <<< """

## mycpp Code Generation

Measure the speedup from mycpp, and the resource usage.

Source code: [oil/mycpp/examples](https://github.com/oilshell/oil/tree/master/mycpp/examples)

"""

  cmark <<< """
### User Time (milliseconds)

Lower ratios are better.

"""

  tsv2html $in_dir/user_time.tsv

  cmark <<< """
  ### Max Resident Set Size (MB)

Lower ratios are better.  We use MB (powers of 10), not MiB (powers of 2).

"""

  tsv2html $in_dir/max_rss.tsv

  cmark <<< """
### System Time (milliseconds)

Lower ratios are better.

"""

  tsv2html $in_dir/sys_time.tsv


  # This file is benchmarks.wwz/mycpp-examples/ or _tmp/mycpp-examples/
  # The link only exists in the latter case
  cmark <<< '''
---
[raw benchmark files](raw/benchmark/index.html)

'''


if false {
  cmark <<< """
### Details

"""

  tsv2html $in_dir/details.tsv
}

  cat <<< """
  </body>
</html>
"""
}

proc soil-run {
  # Run and report mycpp/examples BENCHMARKS only.

  local base_dir=${1:-_tmp/mycpp-examples}
  local in_tsv=_test/benchmark-table.tsv

  # Force SERIAL reexecution of benchmarks
  # Notes:
  # - This is why benchmarks don't really belong in Ninja?
  # - mycpp/TEST.sh test-translator does 'mycpp-logs-equal', which also runs
  #   tests

  local task_dir=_test/tasks/benchmark
  rm -r -f --verbose $task_dir
  ninja -j 1 $in_tsv

  mkdir -p $base_dir/raw
  cp -v $in_tsv $base_dir/raw
  cp -R $task_dir/ $base_dir/raw/benchmark/

  local dir2=$base_dir/stage2
  mkdir -p $dir2

  benchmarks/report.R mycpp $base_dir/raw $dir2

  benchmarks/report.sh stage3 $base_dir mycpp

  # The data is in _test/tasks; we could move it to _test/benchmarks/mycpp/ or
  # something
  find-dir-html $base_dir/raw/benchmark
}

@ARGV
    (DONE benchmarks/mycpp.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/gc.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source benchmarks/common.sh  # benchmark-html-head
source benchmarks/cachegrind.sh  # with-cachegrind
source build/dev-shell.sh  # R_LIBS_USER
source test/tsv-lib.sh

readonly BASE_DIR=_tmp/gc

# duplicated in benchmarks/gc-cachegrind.sh
readonly BASE_DIR_CACHEGRIND=_tmp/gc-cachegrind

# See benchmarks/gperftools.sh.  I think the Ubuntu package is very old

proc download-tcmalloc {
  # TODO: move this to ../oil_DEPS ?
  wget --directory _deps \
    https://github.com/gperftools/gperftools/releases/download/gperftools-2.10/gperftools-2.10.tar.gz

  # Then ./configure; make; sudo make install
  # installs in /usr/local/lib

  # Note: there's a warning about libunwind -- maybe install that first.  Does
  # it only apply to CPU profiles?
}

proc debug-tcmalloc {
  touch mycpp/marksweep_heap.cc

  # No evidence of difference
  for bin in _bin/cxx-{opt,opt+tcmalloc}/osh {
    echo $bin
    ninja $bin

    ldd $bin
    echo

    ls -l $bin
    echo

    # Check what we're linking against
    nm $bin | egrep -i 'malloc|calloc'
    #wc -l
    echo
  }
}

proc install-m32 {
  # needed to compile with -m32
  sudo apt-get install gcc-multilib g++-multilib
}

proc max-rss {
  # %e is real time
  /usr/bin/time --format '%e %M' -- @ARGV
}

proc compare-m32 {
  for bin in _bin/cxx-opt{,32}/osh {
    echo $bin
    ninja $bin

    ldd $bin
    echo

    file $bin
    echo

    ls -l $bin
    echo

    # 141136 KiB vs. 110924 KiB.  Significant savings, but it's slower.
    max-rss $bin --ast-format none -n benchmarks/testdata/configure-coreutils

  }
}

proc banner {
  echo -----
  echo @ARGV
}

proc print-tasks {
  local -a workloads=(
    parse.configure-coreutils
    parse.configure-cpython
    parse.abuild
    ex.bashcomp-parse-help  # only runs with bash
    ex.abuild-print-help  # bash / dash / zsh
    ex.compute-fib  # bash / dash / zsh
  )

  local -a shells=(
    "bash$TAB-"
    "dash$TAB-"
    "zsh$TAB-"

    "_bin/cxx-opt+bumpleak/osh${TAB}mut"
    "_bin/cxx-opt+bumproot/osh${TAB}mut"

    "_bin/cxx-opt+bumpsmall/osh${TAB}mut+alloc"
    "_bin/cxx-opt+nopool/osh${TAB}mut+alloc"
    "_bin/cxx-opt+nopool/osh${TAB}mut+alloc+free+gc"

    # these have trivial GC stats
    "_bin/cxx-opt/osh${TAB}mut+alloc"
    "_bin/cxx-opt/osh${TAB}mut+alloc+free"
    # good GC stats
    "_bin/cxx-opt/osh${TAB}mut+alloc+free+gc"
    "_bin/cxx-opt/osh${TAB}mut+alloc+free+gc+exit"
  )

  if test -n ${TCMALLOC:-} {
    setvar shells = ''(
      "_bin/cxx-opt+tcmalloc/osh${TAB}mut+alloc"
      "_bin/cxx-opt+tcmalloc/osh${TAB}mut+alloc+free"
      "_bin/cxx-opt+tcmalloc/osh${TAB}mut+alloc+free+gc"
    )
  }

  local id=0

  for workload in "${workloads[@]}" {
    for shell in "${shells[@]}" {
      local row_part="$workload${TAB}$shell"

      # Skip these rows
      case (row_part) {
        "ex.bashcomp-parse-help${TAB}dash"* {
          continue
          }
        "ex.bashcomp-parse-help${TAB}zsh"* {
          continue
          }
      }

      local join_id="gc-$id"
      local row="$join_id${TAB}$row_part"
      echo $row

      setvar id = $((id + 1))

    }

    # Run a quick 10 tasks
    if test -n ${QUICKLY:-} && test $id -gt 10 {
      break
    }
  }
}

proc print-cachegrind-tasks {
  local -a workloads=(
    # coreutils is on osh-parser
    #parse.configure-coreutils

    #parse.configure-cpython

    # Faster tasks, like benchmarks/uftrace, which is instrumented
    parse.abuild
    ex.compute-fib
  )

  local -a shells=(
    "bash${TAB}-"
    "_bin/cxx-opt+bumpleak/osh${TAB}mut"
    "_bin/cxx-opt+bumproot/osh${TAB}mut"

    "_bin/cxx-opt+bumpsmall/osh${TAB}mut+alloc"
    "_bin/cxx-opt+nopool/osh${TAB}mut+alloc"
    "_bin/cxx-opt+nopool/osh${TAB}mut+alloc+free+gc"

    "_bin/cxx-opt/osh${TAB}mut+alloc"
    "_bin/cxx-opt/osh${TAB}mut+alloc+free"
    "_bin/cxx-opt/osh${TAB}mut+alloc+free+gc"
  )

  local id=0
  for workload in "${workloads[@]}" {
    for shell in "${shells[@]}" {
      local row_part="$workload${TAB}$shell"

      local join_id="cachegrind-$id"
      local row="$join_id${TAB}$row_part"
      echo $row

      setvar id = $((id + 1))
    }
  }
  #print-tasks | egrep 'configure-coreutils' | egrep osh
}


readonly BIG_THRESHOLD=$(( 1 * 1000 * 1000 * 1000 ))  # 1 B

proc run-tasks {
  local tsv_out=$1
  local mode=${2:-time}

  while read -r join_id task sh_path shell_runtime_opts {

    # Parse different files
    case (task) {
      parse.configure-coreutils {
        setvar data_file = ''benchmarks/testdata/configure-coreutils''
        }
      parse.configure-cpython {
        setvar data_file = ''Python-2.7.13/configure''
        }
      parse.abuild {
        setvar data_file = ''benchmarks/testdata/abuild''
        }
    }

    # Construct argv for each task
    local -a argv
    case (task) {
      parse.* {
        setvar argv = ''( -n $data_file )

        case (sh_path) {
          _bin/*/osh {
            setvar argv = ''( --ast-format none "${argv[@]}" )
            }
        }
        }

      ex.bashcomp-parse-help {
        setvar argv = ''( benchmarks/parse-help/pure-excerpt.sh parse_help_file 
               benchmarks/parse-help/clang.txt )
        }

      ex.abuild-print-help {
        setvar argv = ''( testdata/osh-runtime/abuild -h )
        }

      ex.compute-fib {
        # fewer iterations when instrumented
        local iters
        if test $mode = time {
          setvar iters = '100'
        } else {
          setvar iters = '10'
        }

        setvar argv = ''( benchmarks/compute/fib.sh $iters 44 )
        }

      * {
        die "Invalid task $task"
        }
    }

    echo $join_id $task $sh_path $shell_runtime_opts

    setvar argv = ''( $sh_path "${argv[@]}" )
    #echo + "${argv[@]}"
    #set -x

    if test $mode = cachegrind {
      # Add prefix
      setvar argv = ''( $0 with-cachegrind $BASE_DIR_CACHEGRIND/raw/$join_id.txt "${argv[@]}" )
    }

    # Wrap in a command that writes one row of a TSV
    # Note: for cachegrind, we need the join ID, but the --rusage is meaningless
    local -a instrumented=(
      time-tsv -o $tsv_out --append 
        --rusage
        --field "$join_id" --field "$task" --field "$sh_path"
        --field "$shell_runtime_opts"
        -- "${argv[@]}"
    )

    # Run with the right environment variables

    case (shell_runtime_opts) { 
      - {
        ${instrumented[@]} > /dev/null
        }
      mut {'
        OILS_GC_STATS=1' \
          ${instrumented[@]} > /dev/null
        }
      mut+alloc {'
        # disable GC with big threshold
        OILS_GC_STATS=1'" OILS_GC_THRESHOLD=$BIG_THRESHOLD" \
          ${instrumented[@]} > /dev/null
        }
      mut+alloc+free {'
        # do a single GC on exit
        OILS_GC_STATS=1'" OILS_GC_THRESHOLD=$BIG_THRESHOLD"' OILS_GC_ON_EXIT=1' \
          ${instrumented[@]} > /dev/null
        }
      mut+alloc+free+gc {
        # Default configuration
        #
        # Save the GC stats here.  None of the other runtime options are that
        # interesting.

        if test $mode = 'time' && test $sh_path != _bin/cxx-opt+nopool/osh {'
          OILS_GC_STATS_FD=99' \
            ${instrumented[@]} > /dev/null 99>$BASE_DIR/raw/$join_id.txt
        } else {
          ${instrumented[@]} > /dev/null
        }
        }
      mut+alloc+free+gc+exit {'
        # also GC on exit
        OILS_GC_STATS=1'' OILS_GC_ON_EXIT=1' \
          ${instrumented[@]} > /dev/null
        }

      * {
        die "Invalid shell runtime opts $shell_runtime_opts"
        }
    }

  }

  # TODO: OILS_GC_STATS_FD and tsv_column_from_files.py
}

proc fd-demo {
  local out=_tmp/gc/demo.txt

  local bin=_bin/cxx-dbg/oils-for-unix
  ninja $bin

  # Hm you can't do $fd>out.txt, but that's OK
  local fd=99"

  OILS_GC_STATS_FD=$fd" 99>$out \
    $bin --ast-format none -n benchmarks/testdata/configure99>$out \
    $bin --ast-format none -n benchmarks/testdata/configure

  ls -l $out
  cat $out
}

proc more-variants {
  # TODO: could revive this

  case (compare_more) {
    (*m32* {
      # Surprisingly, -m32 is SLOWER, even though it allocates less.
      # My guess is because less work is going into maintaining this code path in
      # GCC.

      # 223 ms
      # 61.9 MB bytes allocated
      local bin=_bin/cxx-opt32/oils-for-unix"
      OILS_GC_THRESHOLD=$big_threshold" \
        run-osh $tsv_out $bin 'm32 mutator+malloc' $file'

      # 280 ms
      OILS_GC_STATS=1' \
        run-osh $tsv_out $bin 'm32 mutator+malloc+free+gc' $file
      }
  }

  # Show log of GC
  case (compare_more) {
    (*gcverbose* {
      local bin=_bin/cxx-gcverbose/oils-for-unix'
      # 280 ms
      OILS_GC_STATS=1'' OILS_GC_ON_EXIT=1' \
        run-osh $tsv_out $bin 'gcverbose mutator+malloc+free+gc' $file
      }
  }

  if command -v pretty-tsv {
    pretty-tsv $tsv_out
  }
}

proc build-binaries {
  local -a bin=( _bin/cxx-opt{,+bumpleak,+bumproot,+bumpsmall,+nopool}/osh )

  if test -n ${TCMALLOC:-} {
    setvar bin = ''( _bin/cxx-opt+tcmalloc/osh )
  }
  ninja ${bin[@]}
}

proc measure-all {
  build-binaries

  local tsv_out=${1:-$BASE_DIR/raw/times.tsv}
  mkdir -p $(dirname $tsv_out)

  # Make the header
  time-tsv -o $tsv_out --print-header \
    --rusage --field join_id --field task --field sh_path --field shell_runtime_opts

  time print-tasks | run-tasks $tsv_out

  if command -v pretty-tsv {
    pretty-tsv $tsv_out
  }
}

proc measure-cachegrind {
  build-binaries

  local tsv_out=${1:-$BASE_DIR_CACHEGRIND/raw/times.tsv}

  mkdir -p $(dirname $tsv_out)

  # Make the header
  time-tsv -o $tsv_out --print-header \
    --rusage --field join_id --field task --field sh_path --field shell_runtime_opts

  print-cachegrind-tasks | run-tasks $tsv_out cachegrind

  # TODO: join cachegrind columns

  if command -v pretty-tsv {
    pretty-tsv $tsv_out
  }
}

proc print-report {
  local in_dir=$1

  benchmark-html-head 'Memory Management Overhead'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""

  cmark <<< '''
## Memory Management Overhead

Source code: [oil/benchmarks/gc.sh](https://github.com/oilshell/oil/tree/master/benchmarks/gc.sh)
'''

  cmark <<< '''
### GC Stats

'''

  tsv2html $in_dir/gc_stats.tsv

  cmark <<< '''

- Underlying data: [stage2/gc_stats.tsv](stage2/gc_stats.tsv)
- More columns: [stage1/gc_stats.tsv](stage1/gc_stats.tsv)

### Resource Usage

#### parse.configure-cpython

'''

  tsv2html $in_dir/parse.configure-cpython.tsv

  cmark <<< '''
#### parse.configure-coreutils

Parsing the autoconf-generated `configure` script from GNU coreutils.

Note that unlike other shells, `osh -n` retains all nodes on purpose.  (See the
[parser benchmark](../osh-parser/index.html)).

'''

  tsv2html $in_dir/parse.configure-coreutils.tsv

  cmark <<< '''
#### parse.abuild

Parsing `abuild` from Alpine Linux.
'''

  tsv2html $in_dir/parse.abuild.tsv

  cmark <<< '''
#### ex.compute-fib

A synthetic benchmark for POSIX shell arithmetic.
'''

  tsv2html $in_dir/ex.compute-fib.tsv

  cmark <<< '''
#### ex.bashcomp-parse-help

A realistic `bash-completion` workload.
'''

  tsv2html $in_dir/ex.bashcomp-parse-help.tsv

  cmark <<< '''
#### ex.abuild-print-help

Running `abuild -h` from Alpine Linux.

'''

  tsv2html $in_dir/ex.abuild-print-help.tsv

  cmark <<< '''
- Underlying data: [stage2/times.tsv](stage2/times.tsv)
'''

  cat <<< """

  </body>
</html>
"""
}

proc make-report {
  mkdir -p $BASE_DIR/{stage1,stage2}

  # Concatenate tiny files
  benchmarks/gc_stats_to_tsv.py $BASE_DIR/raw/gc-*.txt \
    > $BASE_DIR/stage1/gc_stats.tsv

  # Make TSV files
  benchmarks/report.R gc $BASE_DIR $BASE_DIR/stage2

  # Make HTML
  benchmarks/report.sh stage3 $BASE_DIR
}

proc soil-run {
  ### Run in soil/benchmarks

  measure-all

  make-report
}

#
# Misc Tests
#

proc gc-parse-smoke {
  local variant=${1:-opt}
  local file=${2:-configure}

  local bin=_bin/cxx-$variant/osh
  ninja $bin'

  # OILS_GC_THRESHOLD=1000 OILS_GC_ON_EXIT=1 \
  time _OILS_GC_VERBOSE=1'' OILS_GC_STATS=1' \
    $bin --ast-format none -n $file

  # No leaks
  # OILS_GC_STATS=1 OILS_GC_THRESHOLD=1000 OILS_GC_ON_EXIT=1 $bin -n -c '('
}

proc gc-parse-big {
  local variant=${1:-opt}

  gc-parse-smoke $variant benchmarks/testdata/configure-coreutils
}

proc gc-run-smoke {
  local variant=${1:-opt}

  local bin=_bin/cxx-$variant/oils-for-unix
  ninja $bin'

  # expose a bug with printf
  _OILS_GC_VERBOSE=1'' OILS_GC_STATS=1'' OILS_GC_THRESHOLD=500'' OILS_GC_ON_EXIT=1' \
    $bin -c 'for i in $(seq 100); do printf "%s\\n" "-- $i"; done'
}

proc gc-run-oil {
  ### Run some scripts from the repo

  local variant=${1:-opt}

  local bin=_bin/cxx-$variant/oils-for-unix
  ninja $bin

  local i=0
  for script in */*.sh {
    case (script) {
      (build/clean.sh|build/common.sh|build/dev.sh {
        # Top level does something!
        echo "=== SKIP $script"
        continue
        }
    }

    echo
    echo "=== ($i) $script'"

    # Just run the top level, which (hopefully) does nothing
    _OILS_GC_VERBOSE=1'' OILS_GC_STATS=1'' OILS_GC_THRESHOLD=1000'' OILS_GC_ON_EXIT=1' \
      $bin $script

    setvar i = $((i + 1))
    if test $i -gt 60 {
      break
    }
  }
}

proc gc-run-big {
  local variant=${1:-opt}

  local target=_bin/cxx-$variant/oils-for-unix
  ninja $target

  local osh=$REPO_ROOT/$target

  local dir=_tmp/gc-run-big
  rm -r -f -v $dir
  mkdir -v -p $dir

  pushd $dir'
  time _OILS_GC_VERBOSE=1'' OILS_GC_STATS=1'' OILS_GC_THRESHOLD=100000'' OILS_GC_ON_EXIT=1' \
    $osh ../../Python-2.7.13/configure
  popd
}

proc run-verbose '{
  _OILS_GC_VERBOSE=1'' OILS_GC_STATS=1' \
    /usr/bin/time --format '*** MAX RSS KiB = %M' -- \
    @ARGV
}

# This hit the 24-bit object ID limitation in 2.5 seconds
# Should be able to run indefinitely.
proc run-for-a-long-time {
  local bin=_bin/cxx-opt/osh
  ninja $bin
  run-verbose $bin benchmarks/compute/fib.sh 10000

  # time _OILS_GC_VERBOSE=1 OILS_GC_STATS=1 _bin/cxx-opt/osh benchmarks/compute/fib.sh 10000
}

proc while-loop {
  local i=0
  while test $i -lt 10000 {
    if ((i % 1000 == 0))  {
      echo $i
    }
    setvar i = $((i + 1))
    continue  # BUG: skipped GC point
  }
}

proc for-loop {
  for i in $(seq 10000) {
    if ((i % 1000 == 0))  {
      echo $i
    }
    continue
  }
}

proc recurse {
  local n=${1:-3000}

  if ((n % 100 == 0))  {
    echo $n
  }

  if test $n = 0 {
    return
  }

  recurse $((n - 1))
}

proc test-loops {
  ### Regression for leak

  local bin=_bin/cxx-opt/osh
  ninja $bin

  run-verbose $bin $0 recurse
  echo

  run-verbose $bin $0 while-loop
  echo

  run-verbose $bin $0 for-loop
}

proc expand-loop {
  local n=$1

  local bin=_bin/cxx-opt/osh
  ninja $bin

  set -x'
  time _OILS_GC_VERBOSE=1'' OILS_GC_STATS=1' \
    $bin -c "for i in {1..$n}; do echo \$i; done > /dev/null"
  set +x
}

proc test-brace-exp {
  expand-loop 330000
  expand-loop 340000
}

@ARGV
    (DONE benchmarks/gc.sh)
#!/usr/bin/env bash
#
# Following:
# https://docs.python.org/3/howto/instrumentation.html
#
# Couldn't get this to work.  Even building it from source doesn't work!
# 'stap' invokes a compiler, and I get compiler errors.
#
# It appears to be very brittle.
#
# https://stackoverflow.com/questions/46047270/systemtap-error-on-ubuntu
#
# Usage:
#   ./systemtap.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

proc deps {
  # 'stap' lives in systemtap package
  sudo apt install systemtap systemtap-sdt-dev
}

proc stap-deps {
  # For DWARF debugging info, interesting.
  sudo apt install libdw-dev libdw1
}

# NOTE: systemtap-3.2 is out, but doesn't compile on Ubuntu xenial!
proc download {
  wget --no-clobber --directory _tmp \
    https://sourceware.org/systemtap/ftp/releases/systemtap-3.1.tar.gz
}

proc extract {
  cd _tmp
  tar -x -z < systemtap-3.1.tar.gz
}

readonly PY36=~/src/languages/Python-3.6.1

proc build-python {
  pushd $PY36
  # There is no --with-systemtap/
  ./configure --with-dtrace
  make -j 7
  popd
}

# Default Python build doesn't have it
proc elf {
  readelf -n $(which python3)
  echo ---
  # Now this has "stapsdt" -- SystemTap probe descriptors.
  readelf -n $PY36/python
}

proc _demo {
  #local stp="$PWD/benchmarks/call-hierarchy.stp"

  # C compile errors?  It's getting further.
  #local stp="$PY36/Lib/test/dtracedata/call_stack.stp"
  local stp="$PY36/Lib/test/dtracedata/gc.stp"
  #local stp="$PY36/Lib/test/dtracedata/assert_usable.stp"

  local py="$PWD/test/sh_spec.py"

  pushd $PY36
  stap -v $stp -c "./python $py"
  popd
}
proc demo { sudo $0 _demo; }

@ARGV
    (DONE benchmarks/systemtap.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/callgrind.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

proc fib {
  # Hm dbg build seems to give more exact info
  local osh=_bin/cxx-dbg/osh
  #local osh=_bin/cxx-opt/osh

  ninja $osh

  valgrind --tool=callgrind \
    $osh benchmarks/compute/fib.sh 10 44
}

proc install-kcachegrind {
  sudo apt-get install kcachegrind
}

@ARGV
    (DONE benchmarks/callgrind.sh)
#!/usr/bin/env bash
#
# Benchmarks for small shell snippets.
#
# Usage:
#   ./micro.sh <function name>
#
# TODO: Publish and HTML report with every release.

set -o nounset
set -o pipefail
set -o errexit

# OSH:  583 ms
# bash: 40 ms
# ~10 x
proc assign-loop {
  time for i in $(seq 10000) {
    echo x
  } | wc -l
}

# OSH: 176 ms
# bash: 2 ms!
# This is probably mostly because printf is external!
# ~80x
proc printf-loop {
  time seq 100 | while read line {
    printf '%s\n' $line
  } | wc -l
}

# This microbenchmark justifies the parse_cache member in
# osh/builtin_printf.py.
#
# With the cache, it runs in ~150 ms.
# Without, it runs in ~230 ms.

proc printf-loop-complex {
  time seq 1000 | while read line {
    printf 'hello \t %s \t %q\n' $line 'x y'
  } | wc -l
}

@ARGV
    (DONE benchmarks/micro.sh)
#!/usr/bin/env bash
#
# cachegrind gives instruction counts
#
# Usage:
#   benchmarks/cachegrind.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

source benchmarks/common.sh

proc with-cachegrind {
  ### Run a command under cachegrind, writing to $out_file
  local out_file=$1
  shift

  valgrind --tool=cachegrind \
    --log-file=$out_file \
    --cachegrind-out-file=/dev/null \
    -- @ARGV

  log "Wrote $out_file"
}

setvar file = $(basename $0)
if test $file = 'cachegrind.sh' {
  @ARGV
}
    (DONE benchmarks/cachegrind.sh)
#!/usr/bin/env bash
#
# Run all the benchmarks on a given machine.
#
# Usage:
#   benchmarks/auto.sh <function name>
#
# List of benchmarks:
#
# - Single Machine (for now):
#   - mycpp-examples
#   - gc
# - Multiple machines
#   - osh-parser
#   - osh-runtime
#   - vm-baseline
#   - compute
#     - awk-python could be moved here
#     - startup.sh could be moved here, it also has strace counts
#   - ovm-build

set -o nounset
set -o pipefail
set -o errexit

source test/common.sh  # die
source benchmarks/common.sh  # default value of OSH_OVM
source benchmarks/id.sh

proc measure-shells {
  local host_name=$1
  local job_id=$2
  local out_dir=$3

  local host_job_id="$host_name.$job_id"

  local raw_out_dir
  setvar raw_out_dir = ""$out_dir/osh-runtime/raw.$host_job_id""

  # New Style doesn't need provenance -- it's joined later
  benchmarks/osh-runtime.sh measure \
    $host_name $raw_out_dir $OSH_CPP_BENCHMARK_DATA $out_dir

  # Old style uses provenance.txt.  TODO: use raw_out_dir everywhere
  local provenance=_tmp/provenance.txt

  setvar raw_out_dir = ""$out_dir/vm-baseline/raw.$host_job_id""
  benchmarks/vm-baseline.sh measure \
    $provenance $host_job_id $out_dir/vm-baseline

  setvar raw_out_dir = ""$out_dir/vm-baseline/raw.$host_job_id""
  benchmarks/osh-parser.sh measure \
    $provenance $host_job_id $out_dir/osh-parser

  setvar raw_out_dir = ""$out_dir/compute/raw.$host_job_id""
  benchmarks/compute.sh measure \
    $provenance $host_job_id $out_dir/compute
}

proc measure-builds {
  local host_name=$1
  local job_id=$2
  local out_dir=$3

  # TODO: Use new provenance style, like measure-shells
  local build_prov
  setvar build_prov = $(benchmarks/id.sh compiler-provenance)  # capture the filename

  benchmarks/ovm-build.sh measure $build_prov $out_dir/ovm-build
}

# Run all benchmarks from a clean git checkout.
# Before this, run devtools/release.sh benchmark-build.

proc all {
  local do_machine1=${1:-}

  local host_name
  setvar host_name = $(hostname)  # Running on multiple machines

  local job_id
  setvar job_id = $(print-job-id)

  local host_job_id="$host_name.$job_id"
  local out_dir='../benchmark-data'

  benchmarks/id.sh shell-provenance-2 \
    $host_name $job_id $out_dir \
    ${SHELLS[@]} $OSH_CPP_BENCHMARK_DATA python2

  # TODO: probably move compiler-provenance here

  # Notes:
  # - During release, this happens on machine1, but not machine2
  if test -n $do_machine1 {
    # Only run on one machine
    benchmarks/uftrace.sh soil-run
    benchmarks/mycpp.sh soil-run
    benchmarks/gc.sh soil-run
    benchmarks/gc-cachegrind.sh soil-run

    benchmarks/osh-parser.sh measure-cachegrind \
      _tmp/provenance.txt $host_job_id $out_dir/osh-parser $OSH_CPP_BENCHMARK_DATA
  }

  measure-shells $host_name $job_id $out_dir
  measure-builds $host_name $job_id $out_dir
}

@ARGV
    (DONE benchmarks/auto.sh)
#!/usr/bin/env bash
#
# Take stable measurements of GC
#
# Usage:
#   benchmarks/gc-cachegrind.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source benchmarks/common.sh
source build/dev-shell.sh  # $R_LIBS_USER
source test/tsv-lib.sh

readonly BASE_DIR=_tmp/gc-cachegrind

proc print-report {
  local in_dir=$1

  benchmark-html-head 'Memory Management (stable measurements)'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""

  cmark <<< '''
## Memory Management (stable measurements)

Source code: [oil/benchmarks/gc-cachegrind.sh](https://github.com/oilshell/oil/tree/master/benchmarks/gc-cachegrind.sh)
'''

  cmark <<< '''
#### parse.abuild

'''

  tsv2html $in_dir/parse.abuild.tsv

  cmark <<< '''
#### ex.compute-fib

'''

  tsv2html $in_dir/ex.compute-fib.tsv


  cat <<< """

  </body>
</html>
"""
}


proc make-report {
  mkdir -p $BASE_DIR/{stage1,stage2}

  # Concatenate tiny files
  benchmarks/cachegrind_to_tsv.py $BASE_DIR/raw/cachegrind-*.txt \
    > $BASE_DIR/stage1/cachegrind.tsv

  #pretty-tsv $BASE_DIR/stage1/cachegrind.tsv

  # Make TSV files
  benchmarks/report.R gc-cachegrind $BASE_DIR $BASE_DIR/stage2

  #pretty-tsv $BASE_DIR/stage2/counts.tsv

  # Make HTML
  benchmarks/report.sh stage3 $BASE_DIR
}


proc soil-run {
  ### Run in soil/benchmarks2 (stable timings)

  benchmarks/gc.sh measure-cachegrind

  make-report
}


@ARGV
    (DONE benchmarks/gc-cachegrind.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/compute/control_flow.sh <function name>

# Each of these 3 functions is a double loop that computes roughly n^2.

proc do_continue {
  local n=$1
  local i=0
  local sum=0

  while test $i -lt $n {
    local j=0

    while test $j -lt $n {
      setvar j = $(( j + 1 ))
      setvar sum = $((sum + 1))

      # This NO-OP continue penalizes OSH!  It's almost as fast as bash without
      # it, but them becomes twice as slow.

      continue
    }

    setvar i = $(( i + 1 ))

  }

  echo "    sum=$sum"
}

proc do_break {
  local n=$1
  local i=0
  local sum=0

  while test $i -lt $n {
    local j=0

    while test $j -lt $n {
      setvar j = $(( j + 1 ))
      setvar sum = $((sum + 1))

      # Extra break statement!
      if test $j -eq $i {
        break;
      }
      continue
    }

    setvar i = $(( i + 1 ))

  }

  echo "    sum=$sum"
}



proc do_neither {
  local n=$1
  local i=0
  local sum=0

  while test $i -lt $n {
    local j=0

    while test $j -lt $n {
      setvar j = $(( j + 1 ))
      setvar sum = $((sum + 1))
    }

    setvar i = $(( i + 1 ))

  }

  echo "    sum=$sum"
}

@ARGV
    (DONE benchmarks/compute/control_flow.sh)
#!/usr/bin/env bash
#
# Usage:
#   ./bubble_sort.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

# Fix the lexicographical comparisons!!!
setvar LANG = 'C'

proc main {
  mapfile seq

  #echo ${#seq[@]}

  if test $1 = 'int' {
    # Sort by integer value
    local changed=T
    while test -n $changed {
      setvar changed = ''''
      for (( i = 0; i < ${#seq[@]} - 1; ++i )); do
        if (( seq[i] > seq[i+1] )); then
          tmp=${seq[i+1]}
          seq[i+1]=${seq[i]}
          seq[i]=$tmp
          changed=T
        fi
      done
    }

  } else {
    # Sort by bytes
    local changed=T
    while test -n $changed {
      setvar changed = ''''
      for (( i = 0; i < ${#seq[@]} - 1; ++i )); do
        # LANG=C required here
        if [[ ${seq[i]} > ${seq[i+1]} ]]; then
          tmp=${seq[i+1]}
          seq[i+1]=${seq[i]}
          seq[i]=$tmp
          changed=T
        fi
      done
    }
  }

  for line in "${seq[@]}" {
    echo -n $line
  }
}

main @ARGV
    (DONE benchmarks/compute/bubble_sort.sh)
#!/bin/sh
#
# POSIX shell script to compute fibonacci many times in a loop.  #
# Shells have similar speeds:
# dash: ~110 ms
# ash: ~290 ms -- the fork is slower!
# zsh: ~290 ms
# mksh: ~380 ms
# bash: ~430 ms
# yash: ~460 ms
#
# Note: all shells use 64 bit integers on 64 bit platforms!  But is that
# justified?  I want shell scripts to be portable!
#
# TODO: detect overflow in OSH.
#
# Note: fib(44) < 2^31, but fib(45) is greater
# Note: fib(544) < 2^63, but fib(545) is greater

setvar iters = ${1:-5}  # first argument of every benchmark should be the number of iterations

setvar n = ${2:-10}  # fib(n)

setvar i = '0'
while test $i -lt $iters {
  setvar j = '0'

  setvar a = '1', b = '1'

  while test $j -lt $n {
    # a, b = b, a+b
    setvar tmp = "$b"
    setvar b = $((a+b))
    setvar a = "$tmp"

    setvar j = $((j+1))
  }

  echo $b

  setvar i = $((i+1))
}
    (DONE benchmarks/compute/fib.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/compute/palindrome.sh unicode
#   benchmarks/compute/palindrome.sh bytes

#shopt -s globasciiranges

proc main {
  if test $1 = "unicode" {
    # Redirects don't work yet in Oil!
    #echo "palindrome.sh: unicode" 1>&2
    true
  } else {
    setvar LANG = 'C'
  }

  while read -r line {
    local n=${#line}

    if test $n -eq 0 {  # skip blank lines
      continue
    }

    setvar h = $((n / 2))  # floor division
    local palindrome=T
    for (( i = 0; i < h; ++i )); do
      #echo ${line:i:1} ${line:n-1-i:1}
      if test "${line:i:1}" != "${line:n-1-i:1}"; then
        palindrome=''
      fi
    done

    if test -n $palindrome {
      printf '%s\n' $line
    }
  }
}

main @ARGV
    (DONE benchmarks/compute/palindrome.sh)
#!/bin/sh
echo hello
    (DONE benchmarks/compute/hello.sh)
#!/usr/bin/env bash
#
# Show superlinear behavior in bash arrays.  Need pretty high N to start seeing
# it.
#
# Usage:
#   ./array_ref.sh MODE

set -o nounset
set -o pipefail
set -o errexit

proc main {
  local mode=$1

  mapfile -t array

  local n=${#array[@]}
  local sum=0

  case (mode) {
    linear {
      for (( i = 0; i < n; ++i )); do
        sum=$((sum + array[i]))
      done
      }

    random {
      for (( i = 0; i < n; ++i )); do
        # Super linear
        sum=$((sum + array[array[i]]))
      done
      }
  }

  echo sum=$sum

  # This doesn't seem to defeat LASTREF?
  #array+=('X')
  #unset 'array[-1]'

  # neither does this
  #array[i]=$i
}

main @ARGV
    (DONE benchmarks/compute/array_ref.sh)
#!/usr/bin/env bash

set -o noglob  # for unquoted $text splitting

proc tokenize {
  # read it once
  read -r -d '' text

  for word in $text {  # relies on word splitting
    echo $word
  }
}

proc main {
  setvar iters = ${1:-100}

  # read it once
  read -r -d '' text

  declare -A words

  # do it a bunch of times
  for (( i = 0; i < iters; ++i )); do

    # Relies on unquoted IFS splitting.  Difference with Python: Python will
    # give you \, but IFS splitting won't.
    for word in $text; do

      # Hm this isn't correct in bash!
      old=${words["$word"]}
      words["$word"]=$((old + 1))

      # BUG in bash, see spec/assoc case #37
      #(( words["$word"] += 1 ))
      #(( words[\$word] += 1 ))
    done
  done

  # note: we can sort the output in the benchmark and assert that it's the same?

  for word in "${!words[@]}" {
    echo "${words["$word"]} $word"
  }
}

main @ARGV
#tokenize "$@"
    (DONE benchmarks/compute/word_freq.sh)
#!/usr/bin/env bash
#
# Usage:
#   ./startup.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

readonly TIMEFORMAT='%R'

# 3 groups:
# dash/mksh/lua/awk: < 90 syscalls
# bash/zsh/perl: 145-289 syscalls
# python -S/python3 -S/ruby: 200-800 syscalls

# This throws off absolute timing, but relative still makes sense.
# TODO: get rid of wc -l if not.

# Ubuntu inside Virtualbox on Macbook Air:
#
# dash/mksh/mawk: 1 ms
# bash/gawk/perl: 2 ms
# zsh: 3 ms
# python -S: 5 ms
# python3 -S : 13 ms
# python import: 16 ms
# app.zip / hello.ovm: 10 ms
# oil true: 46 ms
# oil echo hi: 59 ms

proc strace-callback {
  strace @ARGV 2>&1 | wc -l
}

proc time-callback {
  time @ARGV >/dev/null
}

proc compare {
  local callback=${1:-strace-callback}

  # dash is the fastest: 0 ms.
  for sh in bash dash mksh zsh {
    echo $sh
    $callback $sh -c 'echo "hi" > /dev/null'
    echo
  }

  # gawk is slower than mawk
  for awk in gawk mawk {
    echo $awk
    $callback $awk '{ print "hi" } ' < /dev/null
    echo
  }

  if which lua {
    echo lua
    $callback lua -e 'print "hi"'
    echo
  }

  echo perl
  $callback perl -e 'print "hi\n"'
  echo

  # Woah 247 ms?  Ruby is slower than Python.
  if which ruby {
    echo ruby
    $callback ruby -e 'print "hi\n"'
    echo
  }

  # Oh almost all stats come from -S!
  for py in python python3 {
    echo $py -S
    $callback $py -S -c 'print("hi")'
    echo
  }

  for py in python python3 {
    echo $py import
    $callback $py -S -c 'import json;print("hi")'
    echo
  }

  for py in python python3 {
    echo $py import
    $callback $py -S -c 'import json;print("hi")'
    echo
  }

  echo 'Small app.zip'
  $callback python -S _tmp/app.zip
  echo

  # This is close to app.zip, a few milliseconds slower.
  echo 'hello app bundle'
  $callback _bin/hello.ovm || true
  echo

  echo 'OSH app bundle true'
  $callback _bin/true
  echo

  echo 'OSH app bundle Hello World'
  $callback _bin/osh -c 'echo hi'
  echo
}

proc compare-strace {
  compare strace-callback
}

proc compare-time {
  compare time-callback
}

proc import-stats {
  # 152 sys calls!  More than bash needs to start up.
  echo json
  strace python -c 'import json' 2>&1 | grep json | wc -l

  echo nonexistent___
  strace python -c 'import nonexistent___' 2>&1 | grep nonexistent___ | wc -l
}

proc make-zip {
  rm -r -f _tmp/app
  rm -f _tmp/app.zip

  mkdir -p _tmp/app

  cat > _tmp/app/lib1.py <<< """
print "hi from lib1"
"""

  cat > _tmp/app/lib2.py <<< """
print "hi from lib2"
"""

  cat > _tmp/app/__main__.py <<< """
import sys
sys.path = [sys.argv[0]]
import lib1
import lib2
print "hi from zip"
"""

  pushd _tmp/app
  zip -r ../app.zip .
  popd
}

# Can get this down to 5 ms, 593 syscalls.  Needs to be much less.
proc test-zip {
  python -S _tmp/app.zip
}

# This still tries to import encodings and stuff like that.
proc strace-zip {
  strace python -S _tmp/app.zip
}

@ARGV
    (DONE benchmarks/startup.sh)
#!/usr/bin/env bash
#
# Test scripts found in the wild for both correctness and performance.
#
# Usage:
#   benchmarks/osh-runtime.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)

source benchmarks/common.sh  # tsv-concat
source benchmarks/id.sh  # print-job-id
source soil/common.sh  # find-dir-html
source test/common.sh
source test/tsv-lib.sh  # tsv-row

readonly BASE_DIR=_tmp/osh-runtime

# TODO: Move to ../oil_DEPS
readonly TAR_DIR=$PWD/_deps/osh-runtime  # Make it absolute

#
# Dependencies
#

readonly PY27_DIR=$PWD/Python-2.7.13

# NOTE: Same list in oilshell.org/blob/run.sh.
proc tarballs {
  cat <<< """
tcc-0.9.26.tar.bz2
yash-2.46.tar.xz
ocaml-4.06.0.tar.xz
"""
}

proc download {
  mkdir -p $TAR_DIR
  tarballs | xargs -n 1 -I {} --verbose -- \
    wget --no-clobber --directory $TAR_DIR 'https://www.oilshell.org/blob/testdata/{}'
}

proc extract {
  set -x
  time for f in $TAR_DIR/*.{bz2,xz} {
    tar -x --directory $TAR_DIR --file $f 
  }
  set +x

  ls -l $TAR_DIR
}

#
# Computation
#

proc run-tasks {
  local raw_out_dir=$1
  setvar raw_out_dir = ""$PWD/$raw_out_dir""  # because we change dirs

  local task_id=0
  while read -r host_name sh_path workload {

    log "*** $host_name $sh_path $workload $task_id"

    local sh_run_path
    case (sh_path) {
      /* {  # Already absolute
        setvar sh_run_path = "$sh_path"
        }
      */* {  # It's relative, so make it absolute
        setvar sh_run_path = "$PWD/$sh_path"
        }
      * {  # 'dash' should remain 'dash'
        setvar sh_run_path = "$sh_path"
        }
    }

    local working_dir=''
    local files_out_dir="$raw_out_dir/files-$task_id"
    mkdir -v -p $files_out_dir

    local save_new_files=''

    local -a argv
    case (workload) {
      hello-world {
        setvar argv = ''( testdata/osh-runtime/hello_world.sh )
        }

      abuild-print-help {
        setvar argv = ''( testdata/osh-runtime/abuild -h )
        }

      configure.cpython {
        setvar argv = ''( $PY27_DIR/configure )
        setvar working_dir = "$files_out_dir"
        }

      configure.* {
        setvar argv = ''( ./configure )

        local conf_dir
        case (workload) {
          *.ocaml {
            setvar conf_dir = ''ocaml-4.06.0''
            }
          *.tcc {
            setvar conf_dir = ''tcc-0.9.26''
            }
          *.yash {
            setvar conf_dir = ''yash-2.46''
            }
          * {
            die "Invalid workload $workload"
        }
}

        setvar working_dir = "$TAR_DIR/$conf_dir"
        }

      * {
        die "Invalid workload $workload"
        }
    }

    local -a time_argv=(
      time-tsv 
        --output "$raw_out_dir/times.tsv" --append 
        --rusage
        --field "$task_id"
        --field "$host_name" --field "$sh_path"
        --field "$workload"
        -- "$sh_run_path" "${argv[@]}"
    )

    local stdout_file="$files_out_dir/STDOUT.txt"
    local gc_stats_file="$raw_out_dir/gc-$task_id.txt"

    # Maybe change dirs
    if test -n $working_dir {
      pushd $working_dir
    }

    if test -n $save_new_files {
      touch __TIMESTAMP
    }

    # Run it, possibly with GC stats
    case (sh_path) {
      *_bin/*/osh {'
        OILS_GC_STATS_FD=99' ${time_argv[@]} > $stdout_file 99> $gc_stats_file
        }
      * {
        ${time_argv[@]} > $stdout_file
        }
    }

    if test -n $save_new_files {
      echo "COPYING to $files_out_dir"
      find . -type f -newer __TIMESTAMP \
        | xargs -I {} -- cp --verbose {} $files_out_dir
    }

    # Restore dir
    if test -n $working_dir {
      popd
    }

    setvar task_id = $((task_id + 1))
  }
}

proc print-tasks {
  local host_name=$1  
  local osh_native=$2

  local -a workloads=(
    hello-world
    abuild-print-help

    configure.cpython
    configure.ocaml
    configure.tcc
    configure.yash
  )

  if test -n ${QUICKLY:-} {
    # Just do the first two
    setvar workloads = ''(
      hello-world
      abuild-print-help
    )
  }

  for sh_path in bash dash bin/osh $osh_native {
    for workload in "${workloads[@]}" {
      tsv-row $host_name $sh_path $workload
    }
  }
}

proc measure {
  local host_name=$1  # 'no-host' or 'lenny'
  local raw_out_dir=$2
  local osh_native=$3  # $OSH_CPP_NINJA_BUILD or $OSH_CPP_BENCHMARK_DATA
  local out_dir=${4:-$BASE_DIR}  # ../benchmark-data/osh-runtime or _tmp/osh-runtime

  mkdir -v -p $raw_out_dir

  local tsv_out="$raw_out_dir/times.tsv"

  # Write header of the TSV file that is appended to.
  time-tsv -o $tsv_out --print-header \
    --rusage \
    --field task_id \
    --field host_name --field sh_path \
    --field workload

  # run-tasks outputs 3 things: raw times.tsv, per-task STDOUT and files, and
  # per-task GC stats
  print-tasks $host_name $osh_native | run-tasks $raw_out_dir

  # Turn individual files into a TSV, adding host
  benchmarks/gc_stats_to_tsv.py $raw_out_dir/gc-*.txt \
    | tsv-add-const-column host_name $host_name \
    > $raw_out_dir/gc_stats.tsv

  cp -v _tmp/provenance.tsv $raw_out_dir
}

proc stage1 {
  local base_dir=${1:-$BASE_DIR}  # _tmp/osh-runtime or ../benchmark-data/osh-runtime
  local single_machine=${2:-}

  local out_dir=$BASE_DIR/stage1  # _tmp/osh-runtime
  mkdir -p $out_dir

  # Globs are in lexicographical order, which works for our dates.

  local -a raw_times=()
  local -a raw_gc_stats=()
  local -a raw_provenance=()

  if test -n $single_machine {
    local -a a=( $base_dir/raw.$single_machine.* )

    setvar raw_times = ''( ${a[-1]}/times.tsv )
    setvar raw_gc_stats = ''( ${a[-1]}/gc_stats.tsv )
    setvar raw_provenance = ''( ${a[-1]}/provenance.tsv )

  } else {
    local -a a=( $base_dir/raw.$MACHINE1.* )
    local -a b=( $base_dir/raw.$MACHINE2.* )

    setvar raw_times = ''( ${a[-1]}/times.tsv ${b[-1]}/times.tsv )
    setvar raw_gc_stats = ''( ${a[-1]}/gc_stats.tsv ${b[-1]}/gc_stats.tsv )
    setvar raw_provenance = ''( ${a[-1]}/provenance.tsv ${b[-1]}/provenance.tsv )
  }

  tsv-concat ${raw_times[@]} > $out_dir/times.tsv

  tsv-concat ${raw_gc_stats[@]} > $out_dir/gc_stats.tsv

  tsv-concat ${raw_provenance[@]} > $out_dir/provenance.tsv
}

proc print-report {
  local in_dir=$1

  benchmark-html-head 'OSH Runtime Performance'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""

  cmark <<< '''
## OSH Runtime Performance

Source code: [oil/benchmarks/osh-runtime.sh](https://github.com/oilshell/oil/tree/master/benchmarks/osh-runtime.sh)

### Elapsed Time by Shell (milliseconds)

Some benchmarks call many external tools, while some exercise the shell
interpreter itself.  Parse time is included.

Memory usage is measured in MB (powers of 10), not MiB (powers of 2).
'''
  tsv2html $in_dir/elapsed.tsv

  cmark <<< """
### Memory Usage (Max Resident Set Size in MB)
"""
  tsv2html $in_dir/max_rss.tsv

  cmark <<< """
### GC Stats
"""
  tsv2html $in_dir/gc_stats.tsv

  cmark <<< """
### Details of All Tasks
"""
  tsv2html $in_dir/details.tsv


  cmark <<< '''

### Shell and Host Details
'''
  tsv2html $in_dir/shells.tsv
  tsv2html $in_dir/hosts.tsv

  # Only show files.html link on a single machine
  if test -f $(dirname $in_dir)/files.html {
    cmark <<< '''
---

[raw files](files.html)
'''
  }

  cat <<< """
  </body>
</html>
"""
}

proc soil-run {
  ### Run it on just this machine, and make a report

  rm -r -f $BASE_DIR
  mkdir -p $BASE_DIR

  # TODO: This testdata should be baked into Docker image, or mounted
  download
  extract

  # could add _bin/cxx-bumpleak/oils-for-unix, although sometimes it's slower
  local -a oil_bin=( $OSH_CPP_NINJA_BUILD )
  ninja ${oil_bin[@]}

  local single_machine='no-host'

  local job_id
  setvar job_id = $(print-job-id)

  # Write _tmp/provenance.* and _tmp/{host,shell}-id
  shell-provenance-2 \
    $single_machine $job_id _tmp \
    bash dash bin/osh ${oil_bin[@]}

  local host_job_id="$single_machine.$job_id"
  local raw_out_dir="$BASE_DIR/raw.$host_job_id"
  mkdir -p $raw_out_dir $BASE_DIR/stage1

  measure $single_machine $raw_out_dir $OSH_CPP_NINJA_BUILD

  # Trivial concatenation for 1 machine
  stage1 '' $single_machine

  benchmarks/report.sh stage2 $BASE_DIR

  # Make _tmp/osh-parser/files.html, so index.html can potentially link to it
  find-dir-html _tmp/osh-runtime files

  benchmarks/report.sh stage3 $BASE_DIR
}

#
# Debugging
#

proc compare-cpython {
  local -a a=( ../benchmark-data/osh-runtime/*.broome.2023* )
  #local -a b=( ../benchmark-data/osh-runtime/*.lenny.2023* )

  local dir=${a[-1]}

  echo $dir

  head -n 1 $dir/times.tsv
  fgrep 'configure.cpython' $dir/times.tsv

  local bash_id=2
  local dash_id=8
  local osh_py_id=14
  local osh_cpp_id=20

  set +o errexit

  echo 'bash vs. dash'
  diff -u --recursive $dir/{files-2,files-8} | diffstat
  echo

  echo 'bash vs. osh-py'
  diff -u --recursive $dir/{files-2,files-14} | diffstat
  echo

  echo 'bash vs. osh-cpp'
  diff -u --recursive $dir/{files-2,files-20} | diffstat
  echo

  diff -u $dir/{files-2,files-20}/STDOUT.txt
  echo

  diff -u $dir/{files-2,files-20}/pyconfig.h
  echo

  cdiff -u $dir/{files-2,files-20}/config.log
  echo
}

@ARGV
    (DONE benchmarks/osh-runtime.sh)
#!/usr/bin/env bash
#
# Measure how fast the OSH parser is.
#
# Usage:
#   benchmarks/osh-parser.sh <function name>
#
# Examples:
#   benchmarks/osh-parser.sh soil-run
#   QUICKLY=1 benchmarks/osh-parser.sh soil-run

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd "$(dirname $0)/.."; pwd)  # tsv-lib.sh uses this
readonly REPO_ROOT

source benchmarks/common.sh  # die
source benchmarks/cachegrind.sh  # with-cachgrind
source soil/common.sh  # find-dir-html
source test/tsv-lib.sh  # tsv2html
source test/common.sh  # die

# TODO: The raw files should be published.  In both
# ~/git/oilshell/benchmarks-data and also in the /release/ hierarchy?
readonly BASE_DIR=_tmp/osh-parser
readonly SORTED=$BASE_DIR/tmp/sorted.txt

proc write-sorted-manifest {
  local files=${1:-benchmarks/osh-parser-files.txt}
  local counts=$BASE_DIR/tmp/line-counts.txt
  local csv_out=$2
  local sep=${3:-','}  # CSV or TSV

  # Remove comments and sort by line count
  grep -v '^#' $files | xargs wc -l | sort -n > $counts
    
  # Raw list of paths
  cat $counts | awk '$2 != "total" { print $2 }' > $SORTED

  # Make a CSV file from wc output
  cat $counts | awk -v sep="$sep" '
      BEGIN { print "num_lines" sep "path" }
      $2 != "total" { print $1 sep $2 }' \
      > $csv_out
}

# Called by xargs with a task row.
proc parser-task {
  local out_dir=$1  # output
  local job_id=$2
  local host=$3
  local host_hash=$4
  local sh_path=$5
  local shell_hash=$6
  local script_path=$7

  echo "--- TIME $sh_path $script_path ---"

  local times_out="$out_dir/$host.$job_id.times.csv"

  local shell_name
  setvar shell_name = $(basename $sh_path)

  # Can't use array because of set -u bug!!!  Only fixed in bash 4.4.
  setvar extra_args = ''''
  case (shell_name) {
    osh|oils-for-unix.* {
      setvar extra_args = ''--ast-format none''
      }
  }

  # exit code, time in seconds, host_hash, shell_hash, path.  \0
  # would have been nice here!
  # TODO: TSV
  benchmarks/time_.py \
    --append \
    --output $times_out \
    --rusage \
    --field $host --field $host_hash \
    --field $shell_name --field $shell_hash \
    --field $script_path -- \
    $sh_path -n $extra_args $script_path || echo FAILED
}

# Called by xargs with a task row.
# NOTE: This is very similar to the function above, except that we add
# cachegrind.  We could probably conslidate these.
proc cachegrind-task {
  local out_dir=$1  # output
  local job_id=$2
  local host_name=$3
  local unused2=$4
  local sh_path=$5
  local shell_hash=$6
  local script_path=$7

  echo "--- CACHEGRIND $sh_path $script_path ---"

  local host_job_id="$host_name.$job_id"

  # NOTE: This has to match the path that the header was written to
  local times_out="$out_dir/$host_job_id.cachegrind.tsv"

  local cachegrind_out_dir="$host_job_id.cachegrind"
  mkdir -p $out_dir/$cachegrind_out_dir

  local shell_name
  setvar shell_name = $(basename $sh_path)

  local script_name
  setvar script_name = $(basename $script_path)

  # RELATIVE PATH
  local cachegrind_out_path="${cachegrind_out_dir}/${shell_name}-${shell_hash}__${script_name}.txt"

  # Can't use array because of set -u bug!!!  Only fixed in bash 4.4.
  setvar extra_args = ''''
  case (shell_name) {
    osh|oils-for-unix.* {
      setvar extra_args = ""--ast-format none""
      }
  }

  benchmarks/time_.py \
    --tsv \
    --append \
    --output $times_out \
    --rusage \
    --field $shell_name --field $shell_hash \
    --field $script_path \
    --field $cachegrind_out_path \
    -- \
    $0 with-cachegrind $out_dir/$cachegrind_out_path \
    $sh_path -n $extra_args $script_path || echo FAILED
}

# For each shell, print 10 script paths.
proc print-tasks {
  local provenance=$1
  shift
  # rest are shells

  # Add 1 field for each of 5 fields.
  cat $provenance | filter-provenance @ARGV |
  while read fields {
    if test -n ${QUICKLY:-} {
      # Quick test
      head -n 2 $SORTED | xargs -n 1 -- echo $fields
    } else {
      cat $SORTED | xargs -n 1 -- echo $fields
    }
  }
}

proc cachegrind-parse-configure-coreutils {
  ### Similar to benchmarks/gc, benchmarks/uftrace

  local bin=_bin/cxx-opt/oils-for-unix
  ninja $bin
  local out=_tmp/parse.configure-coreutils.txt 

  local -a cmd=( 
    $bin --ast-format none -n
    benchmarks/testdata/configure-coreutils )

  time ${cmd[@]}

  time cachegrind $out ${cmd[@]}

  echo
  cat $out
}

proc cachegrind-demo {
  #local sh=bash
  local sh=zsh

  local out_dir=_tmp/cachegrind

  mkdir -p $out_dir

  # notes:
  # - not passing --trace-children (follow execvpe)
  # - passing --xml=yes gives error: cachegrind doesn't support XML
  # - there is a log out and a details out

  valgrind --tool=cachegrind \
    --log-file=$out_dir/log.txt \
    --cachegrind-out-file=$out_dir/details.txt \
    -- $sh -c 'echo hi'

  echo
  head -n 20 $out_dir/*.txt
}

readonly NUM_TASK_COLS=6  # input columns: 5 from provenance, 1 for file

# Figure out all tasks to run, and run them.  When called from auto.sh, $2
# should be the ../benchmarks-data repo.
proc measure {
  local provenance=$1
  local host_job_id=$2
  local out_dir=${3:-$BASE_DIR/raw}
  local osh_cpp=${4:-$OSH_CPP_BENCHMARK_DATA}

  local times_out="$out_dir/$host_job_id.times.csv"
  local lines_out="$out_dir/$host_job_id.lines.csv"

  mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir

  # Files that we should measure.  Exploded into tasks.
  write-sorted-manifest '' $lines_out

  # Write Header of the CSV file that is appended to.
  # TODO: TSV
  benchmarks/time_.py --print-header \
    --rusage \
    --field host_name --field host_hash \
    --field shell_name --field shell_hash \
    --field path \
    > $times_out

  local tasks=$BASE_DIR/tasks.txt
  print-tasks $provenance ${SHELLS[@]} $osh_cpp > $tasks

  # Run them all
  cat $tasks | xargs -n $NUM_TASK_COLS -- $0 parser-task $out_dir
}

proc measure-cachegrind {
  local provenance=$1
  local host_job_id=$2
  local out_dir=${3:-$BASE_DIR/raw}
  local osh_cpp=${4:-$OSH_CPP_BENCHMARK_DATA}

  local cachegrind_tsv="$out_dir/$host_job_id.cachegrind.tsv"
  local lines_out="$out_dir/$host_job_id.lines.tsv"

  mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir

  write-sorted-manifest '' $lines_out $'\t'  # TSV

  # TODO: This header is fragile.  Every task should print its own file with a
  # header, and then we can run them in parallel, and join them with
  # devtools/csv_concat.py

  benchmarks/time_.py --tsv --print-header \
    --rusage \
    --field shell_name --field shell_hash \
    --field path \
    --field cachegrind_out_path \
    > $cachegrind_tsv

  local ctasks=$BASE_DIR/cachegrind-tasks.txt

  # zsh weirdly forks during zsh -n, which complicates our cachegrind
  # measurement.  So just ignore it.  (This can be seen with
  # strace -e fork -f -- zsh -n $file)
  print-tasks $provenance bash dash mksh $osh_cpp > $ctasks

  cat $ctasks | xargs -n $NUM_TASK_COLS -- $0 cachegrind-task $out_dir
}

#
# Data Preparation and Analysis
#

proc stage1-cachegrind {
  local raw_dir=$1
  local single_machine=$2
  local out_dir=$3
  local raw_data_csv=$4

  local maybe_host
  if test -n $single_machine {
    # CI: _tmp/osh-parser/raw.no-host.$job_id
    setvar maybe_host = ''no-host''
  } else {
    # release: ../benchmark-data/osh-parser/raw.lenny.$job_id
    #maybe_host=$(hostname)
    setvar maybe_host = "$MACHINE1"  # lenny
  }

  # Only runs on one machine
  local -a sorted=( $raw_dir/$maybe_host.*.cachegrind.tsv )
  local tsv_in=${sorted[-1]}  # latest one

  devtools/tsv_column_from_files.py \
    --new-column irefs \
    --path-column cachegrind_out_path \
    --extract-group-1 'I[ ]*refs:[ ]*([\d,]+)' \
    --remove-commas \
    $tsv_in > $out_dir/cachegrind.tsv

  echo $tsv_in >> $raw_data_csv
}

proc stage1 {
  local raw_dir=${1:-$BASE_DIR/raw}
  local single_machine=${2:-}

  local out=$BASE_DIR/stage1
  mkdir -p $out

  # Construct a one-column CSV file
  local raw_data_csv=$out/raw-data.csv
  echo 'path' > $raw_data_csv

  stage1-cachegrind $raw_dir $single_machine $out $raw_data_csv

  local lines_csv=$out/lines.csv

  local -a raw=()
  if test -n $single_machine {
    local -a a=($raw_dir/$single_machine.*.times.csv)
    setvar raw = ''( ${a[-1]} )
    echo ${a[-1]} >> $raw_data_csv

    # They are the same, output one of them.
    cat $raw_dir/$single_machine.*.lines.csv > $lines_csv 
  } else {
    # Globs are in lexicographical order, which works for our dates.
    local -a a=($raw_dir/$MACHINE1.*.times.csv)
    local -a b=($raw_dir/$MACHINE2.*.times.csv)

    setvar raw = ''( ${a[-1]} ${b[-1]} )
    do {
      echo ${a[-1]}
      echo ${b[-1]}
    } >> $raw_data_csv


    # Verify that the files are equal, and pass one of them.
    local -a c=($raw_dir/$MACHINE1.*.lines.csv)
    local -a d=($raw_dir/$MACHINE2.*.lines.csv)

    local left=${c[-1]}
    local right=${d[-1]}

    if ! diff $left $right {
      die "Benchmarks were run on different files ($left != $right)"
    }

    # They are the same, output one of them.
    cat $left > $lines_csv 
  }

  local times_csv=$out/times.csv
  csv-concat ${raw[@]} > $times_csv

  head $out/*
  wc -l $out/*
}

# TODO:
# - maybe rowspan for hosts: flanders/lenny
#   - does that interfere with sorting?
#
# NOTE: not bothering to make it sortable now.  Just using the CSS.

proc print-report {
  local in_dir=$1

  benchmark-html-head 'OSH Parser Performance'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""

  cmark <<< '''
## OSH Parser Performance

We time `$sh -n $file` for various files under various shells, and repeat then
run under cachegrind for stable metrics.

Source code: [oil/benchmarks/osh-parser.sh](https://github.com/oilshell/oil/tree/master/benchmarks/osh-parser.sh)

### Summary

#### Instructions Per Line (via cachegrind)

Lower numbers are generally better, but each shell recognizes a different
language, and Oil uses a more thorough parsing algorithm.  In **thousands** of
"I refs".

'''
  tsv2html $in_dir/cachegrind_summary.tsv

  cmark <<< '''

(zsh isn't measured because `zsh -n` unexpectedly forks.)

#### Average Parsing Rate, Measured on Two Machines (lines/ms)

Shell startup time is included in the elapsed time measurements, but long files
are chosen to minimize its effect.
'''
  csv2html $in_dir/summary.csv

  cmark <<< '### Per-File Measurements'
  echo

  # Flat tables for CI
  if test -f $in_dir/times_flat.tsv {
    cmark <<< '#### Time and Memory'
    echo

    tsv2html $in_dir/times_flat.tsv
  }
  if test -f $in_dir/cachegrind_flat.tsv {
    cmark <<< '#### Instruction Counts'
    echo

    tsv2html $in_dir/cachegrind_flat.tsv
  }

  # Breakdowns for release
  if test -f $in_dir/instructions.tsv {
    cmark <<< '#### Instructions Per Line (in thousands)'
    echo
    tsv2html $in_dir/instructions.tsv
  }

  if test -f $in_dir/elapsed.csv {
    cmark <<< '#### Elapsed Time (milliseconds)'
    echo
    csv2html $in_dir/elapsed.csv
  }

  if test -f $in_dir/rate.csv {
    cmark <<< '#### Parsing Rate (lines/ms)'
    echo
    csv2html $in_dir/rate.csv
  }

  if test -f $in_dir/max_rss.csv {
    cmark <<< '''
### Memory Usage (Max Resident Set Size in MB)

Again, Oil uses a **different algorithm** (and language) than POSIX shells.  It
builds an AST in memory rather than just validating the code line-by-line.

'''
    csv2html $in_dir/max_rss.csv
  }

  cmark <<< """
### Shell and Host Details
"""
  csv2html $in_dir/shells.csv
  csv2html $in_dir/hosts.csv

  cmark <<< """
### Raw Data
"""
  csv2html $in_dir/raw-data.csv

  # Only show files.html link on a single machine
  if test -f $(dirname $in_dir)/files.html {
    cmark <<< '''
---
[raw files](files.html)

'''
  }

  cat <<< """
  </body>
</html>
"""
}

proc soil-run {
  ### Run it on just this machine, and make a report

  rm -r -f $BASE_DIR
  mkdir -p $BASE_DIR

  local -a oil_bin=( $OSH_CPP_NINJA_BUILD )
  ninja ${oil_bin[@]}

  local single_machine='no-host'

  local job_id
  setvar job_id = $(benchmarks/id.sh print-job-id)

  benchmarks/id.sh shell-provenance-2 \
    $single_machine $job_id _tmp \
    bash dash bin/osh ${oil_bin[@]}

  # TODO: measure* should use print-tasks | run-tasks
  local provenance=_tmp/provenance.txt
  local host_job_id="$single_machine.$job_id"

  measure $provenance $host_job_id '' $OSH_CPP_NINJA_BUILD

  measure-cachegrind $provenance $host_job_id '' $OSH_CPP_NINJA_BUILD

  # TODO: R can use this TSV file
  cp -v _tmp/provenance.tsv $BASE_DIR/stage1/provenance.tsv

  # Trivial concatenation for 1 machine
  stage1 '' $single_machine

  benchmarks/report.sh stage2 $BASE_DIR

  # Make _tmp/osh-parser/files.html, so index.html can potentially link to it
  find-dir-html _tmp/osh-parser files

  benchmarks/report.sh stage3 $BASE_DIR
}

@ARGV
    (DONE benchmarks/osh-parser.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchamrks/builtin-io.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

readonly BIG=_tmp/1m_lines.txt

proc setup {
  seq 1000000 > $BIG

}

# 25 ms
proc wc-big {
  time wc -l $BIG
}

# bash takes 156 ms here!  Significantly slower than 'wc'.
# bin/osh in Python takes over 5 seconds!
#
# TODO:
# - Make sure bin/osh in C++ is reasonably fast.
# - Make sure a loop with read --line is reasonably fast.

proc mapfile-big {
  time mapfile < $BIG
  echo ${#MAPFILE[@]}  # verify length
}

# Hm this isn't that fast either, about 100 ms.
proc python-big {
  time python -S -c '
import sys
i = 0
for line in sys.stdin:
  i += 1
print(i)
' < $BIG
}

proc bash-syscall {
  # Shows that there are tons of read(0, 1) calls!
  seq 20 | strace -e read -- bash -c 'mapfile'
}

proc python-syscall {
  # Does read(0, 4096).  A saner way to read files
  seq 20 | strace -e read -- python -c '
import sys
for line in sys.stdin:
  print(line)
'
}


@ARGV
    (DONE benchmarks/builtin-io.sh)
#!/usr/bin/env bash
#
# Synthetic test with 1000 regexes.
#
# Usage:
#   benchmarks/regcomp-cache/run.sh <function name>
#
# Example:
#   benchmarks/regcomp-cache/run.sh match-many

set -o nounset
set -o pipefail
set -o errexit

proc match-many {
  local num_pat=${1:-300}
  local num_str=${2:-300}
  local repeat=${3:-1}

  # 1 2 3
  local repeat_str
  setvar repeat_str = $(seq $repeat)

  echo BASH_VERSION=${BASH_VERSION:-}
  echo OILS_VERSION=${OILS_VERSION:-}

  declare -a REGEXES=()
  for i in $(seq $num_pat) {
    setvar REGEXES[i]="$i?($i*)$i+"  # last char is modified with ? then * and +
  }

  echo ${REGEXES[@]}

  local num_yes=0
  local num_tried=0

  for i in $(seq $num_str) {
    local str="$i$i$i"  # 3 copies
    for j in $(seq $num_pat) {
      local re="${REGEXES[j]}"
      for k in $repeat_str {  # potentially use the cache more
        if [[ $str =~ $re ]] {
          echo "string $str matches pattern $re - captured '${BASH_REMATCH[1]}'"
          setvar num_yes = $(( num_yes + 1 ))
        }
        setvar num_tried = $(( num_tried + 1 ))
      }
    }
  }

  echo
  echo "num_yes = $num_yes"
  echo "num_tried = $num_tried"
}

proc compare {
  # must do ./NINJA-config.sh first

  local bin=_bin/cxx-opt/osh
  ninja $bin

  local dir=_tmp/regcomp-cache
  mkdir -p $dir

  # with bash
  do { time $0 match-many @ARGV; } >$dir/bash-stdout.txt 2>$dir/bash-time.txt

  # with OSH
  do { time $bin $0 match-many @ARGV; } >$dir/osh-stdout.txt 2>$dir/osh-time.txt

  # should have equal output except for version
  diff $dir/*-stdout.txt || true

  # show timings
  head $dir/*-time.txt
}


@ARGV

    (DONE benchmarks/regcomp-cache/run.sh)
#!/usr/bin/env bash
#
# Compare operations on data structures, with little I/O: strings, array,
# associative arrays, integers.
#
# Usage:
#   benchmarks/compute.sh <function name>
#
# List of benchmarks:
#
# - fib: integer, loop, assignment (shells don't have real integers
# - word_freq: hash table / assoc array (OSH uses a vector<pair<>> now!)
#              also integer counter
# - bubble_sort: indexed array (bash uses a linked list?)
# - palindrome: string, slicing, unicode
# - parse_help: realistic shell-only string processing, which I didn't write.
#
# TODO:
# - vary problem size, which is different than iters
#   - bubble sort: array length, to test complexity of array indexing
#   - palindrome: longer lines, to test complexity of unicode/byte slicing
#   - word_freq: more unique words, to test complexity of assoc array
# - write awk versions of each benchmark (could be contributed)
# - assert that stdout is identical
# - create data frames and publish results
#   - leave holes for Python, other shells, etc.

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd $(dirname $0)/.. && pwd)
readonly REPO_ROOT

source benchmarks/common.sh  # filter-provenance
source test/tsv-lib.sh  # tsv2html

readonly BASE_DIR=_tmp/compute

# Stabilize 'sort' output across machines (ugh locales!)
export LC_ALL=C

setvar TIMEFORMAT = ''%U''

# task_name,iter,args
proc hello-tasks {
  local provenance=$1

  # Add 1 field for each of 5 fields.
  cat $provenance | filter-provenance python2 bash dash $OSH_CPP_REGEX |
  while read fields {
    echo 'hello _ _' | xargs -n 3 -- echo $fields
  }
}

# task_name,iter,args
proc fib-tasks {
  local provenance=$1

  # Add 1 field for each of 5 fields.
  cat $provenance | filter-provenance python2 bash dash $OSH_CPP_REGEX |
  while read fields {
    echo 'fib 200 44' | xargs -n 3 -- echo $fields
  }
}

proc word_freq-tasks {
  local provenance=$1

  cat $provenance | filter-provenance python2 bash $OSH_CPP_REGEX |
  while read fields {
    # BUG: oils-for-unix differs on these two.  Looks like it's related to
    # backslashes!
    #echo 'word_freq 10 benchmarks/testdata/abuild' | xargs -n 3 -- echo "$fields"
    #echo 'word_freq 2 benchmarks/testdata/ltmain.sh' | xargs -n 3 -- echo "$fields"
    echo 'word_freq 10 configure' | xargs -n 3 -- echo $fields
  }
}

proc assoc_array-tasks {
  local provenance=$1

  cat $provenance | filter-provenance python2 bash $OSH_CPP_REGEX |
  while read fields {
    for n in 1000 2000 3000 {
      echo "word_freq 10 $n" | xargs -n 3 -- echo $fields
    }
  }
}

proc bubble_sort-tasks {
  # Note: this is quadratic, but bubble sort itself is quadratic!
  local provenance=$1

  cat $provenance | filter-provenance python2 bash $OSH_CPP_REGEX |
  while read fields {
    echo 'bubble_sort int   200' | xargs -n 3 -- echo $fields
    echo 'bubble_sort bytes 200' | xargs -n 3 -- echo $fields
  }
}

# Arrays are doubly linked lists in bash!  With a LASTREF hack to avoid being
# quadratic.  
#
# See array_reference() in array.c in bash.  It searches both back and
# forward.  Every cell has its index, a value, a forward pointer, and a back
# pointer.
#
# You need pretty high N to see the quadratic behavior though!

# NOTE: osh is also slower with linear access, but not superlinear!

proc array_ref-tasks {
  local provenance=$1

  cat $provenance | filter-provenance bash |
  while read fields {
    for mode in seq random {
      for n in 10000 20000 30000 40000 {
        echo "array_ref $mode $n" | xargs -n 3 -- echo $fields
      }
    }
  }

#array_ref $OSH_CC  seq    5000
#array_ref $OSH_CC  seq    10000
#array_ref $OSH_CC  random 5000
#array_ref $OSH_CC  random 10000
#EOF
}

proc palindrome-tasks {
  local provenance=$1

  cat $provenance | filter-provenance python2 bash $OSH_CPP_REGEX |
  while read fields {
    echo 'palindrome unicode _' | xargs -n 3 -- echo $fields
    echo 'palindrome bytes   _' | xargs -n 3 -- echo $fields
  }
}

proc parse_help-tasks {
  local provenance=$1

  cat $provenance | filter-provenance bash $OSH_CPP_REGEX |
  while read fields {
    echo 'parse_help ls-short _' | xargs -n 3 -- echo $fields
    echo 'parse_help ls       _' | xargs -n 3 -- echo $fields
    echo 'parse_help mypy     _' | xargs -n 3 -- echo $fields
  }
}

proc ext {
  local ext
  case (runtime) { 
    (python2 {
      echo 'py'
      }
    (*sh | *osh* {
      echo 'sh'
      }
  }
}

proc word_freq-one {
  ### Run one word_freq task (hash tables)

  local name=${1:-word_freq}
  local runtime=$2

  local iters=${3:-10}
  local in=${4:-configure}  # input

  $runtime benchmarks/compute/word_freq.$(ext $runtime) $iters < $in | sort -n
}

proc assoc_array-one {
  ### Run word_freq with seq

  local name=${1:-word_freq}
  local runtime=$2

  local iters=${3:-10}
  local n=${4:-10} 

  # shuf so we don't get the bash optimization
  seq $n | shuf |
  $runtime benchmarks/compute/word_freq.$(ext $runtime) $iters | sort -n
}

proc bubble_sort-one {
  ### Run one bubble_sort task (arrays)

  local name=${1:-bubble_sort}
  local runtime=$2
  local mode=${3:-int}
  local n=${4:-100}

  $runtime benchmarks/compute/bubble_sort.$(ext $runtime) $mode \
     < $BASE_DIR/tmp/$name/testdata-$n.txt
}

# OSH is like 10x faster here!
proc array_ref-one {
  ### Run one array_ref task (arrays)

  local name=${1:-bubble_sort}
  local runtime=$2
  local mode=${3:-seq}
  local n=${4:-100}

  seq $n | shuf | $runtime benchmarks/compute/array_ref.$(ext $runtime) $mode
}

proc palindrome-one {
  ### Run one palindrome task (strings)

  local name=${1:-palindrome}
  local runtime=$2
  local mode=${3:-unicode}

  $runtime benchmarks/compute/palindrome.$(ext $runtime) $mode \
    < $BASE_DIR/tmp/$name/testdata.txt
}

proc parse_help-one {
  ### Run one palindrome task (strings, real code)

  local name=${1:-parse_help}
  local runtime=$2
  local workload=${3:-}

  $runtime benchmarks/parse-help/pure-excerpt.sh _parse_help - \
    < benchmarks/parse-help/$workload.txt
}

#
# Helpers
#

proc hello-all { task-all hello @ARGV; }
proc fib-all { task-all fib @ARGV; }
proc word_freq-all { task-all word_freq @ARGV; }
proc assoc_array-all { task-all assoc_array @ARGV; }

# TODO: Fix the OSH comparison operator!  It gives the wrong answer and
# completes quickly.
proc bubble_sort-all { task-all bubble_sort @ARGV; }

# Array that is not quadratic
proc array_ref-all { task-all array_ref @ARGV; }

# Hm osh is a little slower here
proc palindrome-all { task-all palindrome @ARGV; }

proc parse_help-all { task-all parse_help @ARGV; }

proc task-all {
  local task_name=$1
  local provenance=$2
  local host_job_id=$3
  local out_dir=$4  # put files to save in benchmarks-data repo here

  local tmp_dir=$BASE_DIR/tmp/$task_name

  local times_tsv=$out_dir/$task_name/$host_job_id.times.tsv
  rm -f $times_tsv

  mkdir -p $tmp_dir $out_dir/$task_name

  # header
  tsv-row \
    status elapsed_secs user_secs sys_secs max_rss_KiB \
    stdout_md5sum \
    host_name host_hash \
    runtime_name runtime_hash \
    task_name arg1 arg2 stdout_filename > $times_tsv

  local task_id=0

  ${task_name}-tasks $provenance > $tmp_dir/tasks.txt

  cat $tmp_dir/tasks.txt |
  while read _ host host_hash runtime runtime_hash _ arg1 arg2 {
    local file
    case (runtime) { 
      (python2 {
        setvar file = ''py''
        }
      (*sh | *osh* {
        setvar file = $(basename $runtime)
        }
    }

    #log "runtime=$runtime args=$args"

    local stdout_filename="stdout-$file-$arg1-$(basename $arg2).txt"

    # Measurement BUG!  This makes dash have the memory usage of bash!
    # It's better to get argv into the shell.

    local -a cmd
    case (task_name) {
      (hello|fib {
        # Run it DIRECTLY, do not run $0.  Because we do NOT want to fork bash
        # then dash, because bash uses more memory.
        setvar cmd = ''($runtime benchmarks/compute/$task_name.$(ext $runtime) "$arg1" "$arg2")
        }
      (* {
        setvar cmd = ''($0 ${task_name}-one "$task_name" "$runtime" "$arg1" "$arg2")
        }
    }

    # join args into a single field
    time-tsv -o $times_tsv --append \
      --stdout $tmp_dir/$stdout_filename \
      --rusage \
      --field $host --field $host_hash \
      --field $runtime --field $runtime_hash \
      --field $task_name --field $arg1 --field $arg2 \
      --field $stdout_filename -- \
      ${cmd[@]}

    setvar task_id = $((task_id + 1))
  }

  #wc -l _tmp/compute/word_freq/*
  maybe-tree $tmp_dir
  cat $times_tsv
}

#
# Testdata
#

proc bubble_sort-testdata {
  local out=$BASE_DIR/tmp/bubble_sort
  mkdir -p $out

  # TODO: Make these deterministic for more stable benchmarks?
  for n in 100 200 300 400 {
    seq $n | shuf > $out/testdata-$n.txt
  }

  wc -l $out/testdata-*.txt
}

proc palindrome-testdata {
  local out=$BASE_DIR/tmp/palindrome
  mkdir -p $out

  # TODO: Use iters?

  for i in $(seq 500) {
    cat <<< """
foo
a
tat
cat

noon
amanaplanacanalpanama

μ
-μ-
"""

  } > $out/testdata.txt
  
  wc -l $out/testdata.txt
}

proc measure {
  local provenance=$1
  local host_job_id=$2
  local out_dir=${3:-$BASE_DIR/raw}  # ../benchmark-data/compute

  mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir

  # set -x
  hello-all $provenance $host_job_id $out_dir
  fib-all $provenance $host_job_id $out_dir

  # TODO: doesn't work because we would need duplicate logic in stage1
  #if test -n "${QUICKLY:-}"; then
  #  return
  #fi
  
  word_freq-all $provenance $host_job_id $out_dir
  parse_help-all $provenance $host_job_id $out_dir

  bubble_sort-testdata
  palindrome-testdata

  bubble_sort-all $provenance $host_job_id $out_dir

  # INCORRECT, but still run it
  palindrome-all $provenance $host_job_id $out_dir

  # array_ref takes too long to show quadratic behavior, and that's only
  # necessary on 1 machine.  I think I will make a separate blog post,
  # if anything.

  maybe-tree $out_dir
}

proc soil-run {
  ### Run it on just this machine, and make a report

  rm -r -f $BASE_DIR
  mkdir -p $BASE_DIR

  # Test the one that's IN TREE, NOT in ../benchmark-data
  local -a oil_bin=( $OSH_CPP_NINJA_BUILD _bin/cxx-opt+bumpleak/osh)
  ninja ${oil_bin[@]}

  local single_machine='no-host'

  local job_id
  setvar job_id = $(benchmarks/id.sh print-job-id)

  # Only measure what's in the Docker image
  # - The Soil 'benchmarks' job uses the 'cpp' Docker image, which doesn't have
  #   layer-cpython, ../oil_DEPS/cpython-full
  # - It also doesn't have mksh or zsh

  benchmarks/id.sh shell-provenance-2 \
    $single_machine $job_id _tmp \
    bash dash python2 ${oil_bin[@]}

  local provenance=_tmp/provenance.txt
  local host_job_id="$single_machine.$job_id"

  measure $provenance $host_job_id

  # Make it run on one machine
  stage1 '' $single_machine

  benchmarks/report.sh stage2 $BASE_DIR
  benchmarks/report.sh stage3 $BASE_DIR
}


proc test-report {
  # Make it run on one machine
  stage1 '' no-host

  benchmarks/report.sh stage2 $BASE_DIR
  benchmarks/report.sh stage3 $BASE_DIR
}

proc stage1 {
  local raw_dir=${1:-$BASE_DIR/raw}

  # This report works even if we only have one machine
  local single_machine=${2:-}

  local out_dir=$BASE_DIR/stage1
  mkdir -p $out_dir

  local times_tsv=$out_dir/times.tsv

  local -a raw=()

  # TODO: We should respect QUICKLY=1
  for metric in hello fib word_freq parse_help bubble_sort palindrome {
    local dir=$raw_dir/$metric

    if test -n $single_machine {
      local -a a=($dir/$single_machine.*.times.tsv)
      setvar raw = ''(${a[-1]})
    } else {
      # Globs are in lexicographical order, which works for our dates.
      local -a a=($dir/$MACHINE1.*.times.tsv)
      local -a b=($dir/$MACHINE2.*.times.tsv)  # HACK for now

      # take the latest file
      setvar raw = ''(${a[-1]} ${b[-1]})
    }

  }
  csv-concat ${raw[@]} > $times_tsv
  wc -l $times_tsv
}

proc print-report {
  local in_dir=$1

  benchmark-html-head 'OSH Compute Performance'

  cat <<< """
  <body class="width60">
    <p id="home-link">
      <a href="/">oilshell.org</a>
    </p>
"""
  cmark <<< """

## OSH Compute Performance

Running time and memory usage of programs that test data structures (as opposed
to I/O).

Memory usage is measured in MB (powers of 10), not MiB (powers of 2).

Source code: [oil/benchmarks/compute](https://github.com/oilshell/oil/tree/master/benchmarks/compute)

"""

  cmark <<< """
### hello (minimal startup)

"""

  tsv2html $in_dir/hello.tsv

  cmark <<< """
### fibonacci (integers)

- arg1: number of repetitions
- arg2: the N in fib(N)
"""

  tsv2html $in_dir/fib.tsv

  cmark <<< """
### word_freq (associative arrays / hash tables)

- arg1: number of repetitions
- arg2: the file (varies size of hash table)
"""

  tsv2html $in_dir/word_freq.tsv

  cmark <<< """
### parse_help (strings, real code)

- arg1: file to parse
"""

  tsv2html $in_dir/parse_help.tsv

  cmark <<< """
### bubble_sort (array of integers, arrays of strings)

- arg1: type of array
- arg2: length of array
"""

  tsv2html $in_dir/bubble_sort.tsv

  # Comment out until checksum is fixed

if false {
  cmark <<< """
### palindrome (byte strings, unicode strings)

- arg1: type of string
- arg2: TODO: length of string
"""

  tsv2html $in_dir/palindrome.tsv

}

  cmark <<< """
### Interpreter and Host Details
"""

  tsv2html $in_dir/shells.tsv
  tsv2html $in_dir/hosts.tsv

  cmark <<< """
### Details
"""

  tsv2html $in_dir/details.tsv

  cmark <<< """
### Stdout Files
"""

  tsv2html $in_dir/stdout_files.tsv


  cat <<< """
  </body>
</html>
"""
}


proc control-flow {
  local osh=_bin/cxx-opt/osh
  #set -x

  ninja $osh

  # do_neither: dash 296 ms, bash 922, osh 993.  Not bad
  # 

  for func in do_neither do_continue do_break {
    echo "=== $func"
    echo
    for sh in dash bash $osh {
      echo "--- $sh"
      # TIMEFORMAT above
      time $sh benchmarks/compute/control_flow.sh $func 500
      echo
    }
  }
}

@ARGV
    (DONE benchmarks/compute.sh)
#!/usr/bin/env bash
#
# Run the 'perf' tool and associated reports on OSH.
#
# Usage:
#   benchmarks/perf.sh <function name>
#
# Deps:
#
#   Clone https://github.com/brendangregg/FlameGraph
#   Put it in ~/git/other/FlameGraph, or edit the paths below
#
# Examples:
#
#   $0 install  # install perf, including matching kernel symbols
#
#   $0 profile-osh-parse       # make flame graph
#
#   Then look at _tmp/perf/osh-parse.svg in the browser

#   $0 profile-osh-parse flat  # make flat text report
#
#   perf report -i _tmp/perf/osh-parse.perf  #  interactive
#
# Likewise for
#
#   $0 profile-example escape
#     => _tmp/perf/example-escape.svg
#   $0 profile-example escape flat
#     => _tmp/perf/example-escape.report.txt

set -o nounset
set -o pipefail
set -o errexit

readonly BASE_DIR=_tmp/perf

source test/common.sh  # $OSH

# TODO:
# - kernel symbols.  Is that why there are a lot of [unknown] in opt mode?
# - grep for call_function in collapsed.  I don't see it?
#   - it's inlined I guess?

# Question: PyEval_EvalFrameEx doesn't appear recursive in opt mode?  At least
# according to 'perf'.  Or maybe you don't have enough samples to catch it?

# NOTES:
# - dbg vs. opt matters a lot
# - function-level performance categorization is bad for bytecode interpreters,
#   which have a single function and a big switch statement.
# - a longer file like configure-coreutils hit garbage collection!  collect()
# - reference counting functions: visit_decref, visit_reachable

proc install-ubuntu-packages {
  # linux-tools-generic is the kernel module
  # Apparently you need a package specific to the kernel, not sure why.
  sudo apt-get install \
    linux-tools-common linux-tools-$(uname -r) linux-tools-generic
}

proc install-debian-packages {
  sudo apt-get install linux-perf
}

proc soil-install {
  sudo apt-get update  # seem to need this

  install-ubuntu-packages
}

proc debug-symbols {
  #dpkg --listfiles linux-tools-4.13.0-36-generic
  #sudo apt install python-dbg

  # I don't see symbols files here?  Just the interpreter?  They're built into the ELF file?
  #dpkg --listfiles python-dbg

  # has files in /usr/lib/debug
  # file /usr/lib/debug/.build-id/8d/9bd4ce26e45ef16075c67d5f5eeafd8b562832.debug
  # /usr/lib/debug/.build-id/8d/9bd4ce26e45ef16075c67d5f5eeafd8b562832.debug: ELF 64-bit LSB shared object, x86-64, version 1 (SYSV), dynamically linked, BuildID[sha1]=8d9bd4ce26e45ef16075c67d5f5eeafd8b562832, not stripped
  #
  # https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html

  # Does perf also support separate debug files?
  # How do I set the debug link in oil.ovm?  Or should I set build ID?

  # The GNU binary utilities (Binutils) package includes the ‘objcopy’ utility
  # that can produce the separated executable / debugging information file
  # pairs using the following commands:
  # objcopy --only-keep-debug foo foo.debug
  # strip -g foo

  sudo apt install zlib1g-dbg
  dpkg --listfiles zlib1g-dbg
  #libpython2.7-dbg 
}

# TODO: Link these two tools in ../oil_DEPS/bin or something
# Make them work on CI

# NOTE: I used this before with python-flamegraph too.
proc flamegraph {
  ~/git/other/FlameGraph/flamegraph.pl @ARGV
}

proc stackcollapse-perf {
  ~/git/other/FlameGraph/stackcollapse-perf.pl @ARGV
}

# http://www.brendangregg.com/FlameGraphs/cpuflamegraphs.html
proc make-graph {
  local name=${1:-osh-parse}

  local dir=$BASE_DIR
  perf script -i $dir/$name.perf | stackcollapse-perf > $dir/$name.perf-folded

  flamegraph $dir/$name.perf-folded > $dir/$name.svg

  echo "Wrote $dir/$name.svg"
}

proc _make-readable {
  local perf_raw=$1

  chmod 644 $perf_raw

  # original user
  chown $USER $perf_raw
}

proc make-readable {
  # This gets run as root
  local name=$1

  local perf_raw=$BASE_DIR/$name.perf

  sudo $0 _make-readable $perf_raw

  file $perf_raw
  ls -l $perf_raw
}

proc _record-cpp {
  local name=$1  # e.g. oils-for-unix, escape
  local mode=$2
  shift 2

  # Can repeat 13 times without blowing heap
  #export REPEAT=13 

  local freq=10000

  local extra_flags=''
  case (mode) { 
    (graph { setvar extra_flags = ''-g'' }  # needed to make flame graph
    (flat {  setvar extra_flags = '''' }
    (* {     die "Mode should be graph or flat, got $mode" }
  }

  time perf record $extra_flags -F $freq -o $BASE_DIR/$name.perf -- @ARGV

  make-readable $name
}

proc profile-cpp { 
  local name=$1
  local mode=$2
  shift

  mkdir -p $BASE_DIR

  # -E preserve environment like BENCHMARK=1
  sudo -E $0 _record-cpp $name @ARGV;

  case (mode) { 
    (graph {
      make-graph $name
      }
    (flat {
      local out=$BASE_DIR/$name.report.txt
      text-report $name | tee $out
      echo "Wrote $out"
      }
    (* {
      die "Mode should be graph or flat, got $mode"
      }
  }
}

proc profile-osh-parse {
  # Profile parsing a big file.  More than half the time is in malloc
  # (_int_malloc in GCC), which is not surprising!

  local osh=${1:-_bin/cxx-opt/osh}
  local mode=${2:-graph}

  #local file=benchmarks/testdata/configure
  local file=benchmarks/testdata/configure-coreutils

  local -a cmd=( $osh --ast-format none -n $file )
  profile-cpp 'osh-parse' $mode ${cmd[@]}'

  # 'perf list' shows the events
  #OILS_GC_STATS=1 sudo perf stat -e cache-misses -e cache-references "${cmd[@]}"
  OILS_GC_STATS=1' sudo perf stat ${cmd[@]}'

  # Run again with GC stats
  time OILS_GC_STATS=1' ${cmd[@]}
}

proc profile-fib {
  local osh=${1:-_bin/cxx-opt/osh}
  local mode=${2:-graph}

  # Same iterations as benchmarks/gc
  local -a cmd=( $osh benchmarks/compute/fib.sh 500 44 )

  profile-cpp 'fib' $mode ${cmd[@]}
}

proc profile-execute {
  local osh=${1:-_bin/cxx-opt/osh}
  local mode=${2:-graph}

  local -a cmd=( $osh benchmarks/parse-help/pure-excerpt.sh parse_help_file benchmarks/parse-help/mypy.txt )

  profile-cpp 'parse-help' $mode ${cmd[@]}
}

proc profile-example {
  local example=${1:-escape}
  local mode=${2:-graph}

  local bin="_bin/cxx-opt/mycpp/examples/$example.mycpp"

  ninja $bin
  echo'

  BENCHMARK=1' profile-cpp "example-$example" $mode $bin
}

proc profile-hash-table {
  local mode=${1:-graph}

  local bin='_bin/cxx-opt/mycpp/hash_table'
  ninja $bin
  profile-cpp 'hash_table' $mode $bin -t hash_speed_test
}

# Perf note: Without -o, for some reason osh output is shown on the console.
# It doesn't go to wc?
#perf record -o perf.data -- _bin/osh -n benchmarks/testdata/abuild | wc -l

proc text-report {
  ### Show a batch report; 'perf report' is interactive

  local name=$1
  shift

  local perf_raw=$BASE_DIR/$name.perf

  # Flat report
  perf report -i $perf_raw -n --stdio @ARGV
}

# Shows instruction counts, branch misses, and so forth
#
# Wow 11 billion instructions!  9 billion cycles.  2.3 billion branches.  Crazy.
# Only 21M branch misses, or 0.9%.  Interesting.
proc _stat {
  perf stat -- @ARGV | wc -l
  # -e cache-misses only shows that stat
}
proc stat { sudo $0 _stat @ARGV; }

proc stat-osh-parse {
  stat _bin/cxx-opt/oils-for-unix --ast-format none -n benchmarks/testdata/configure
}


#
# OLD OVM stuff
#

# Parsing abuild in Debug mode:
# 14%  PyEval_EvalFrameEx  -- hm.  Interpreter dispatch is a lot?  More than I
#      thought.  Maybe need my own loop.
# 8%   type_call -- hm introspection?
# 7%   PyObject_GetAttr.  My intitution.  Should be done at compile time!
# 6%   do_richcmp  -- hm interesting
# 5%   PyObject_Malloc.

# More benchmarks:
# OPy running under OVM, compiling itself, compiling Oil, compiling OPy ports,
# etc.

# Parsing abuild, the optimized version.
#
# 80% PyEval_EvalFramEx.  Woah everything is inlined?
# 12.5%  PyObject_GenericGetAtr.  PyObject_GetAttr is much lower down.
# Some kernel.
# 0.76%  lookdict_string is not a bottleneck.  Hm.
#
# Wow.
# Maybe I need counters in optimized mode?
# Yeah what I really want is per opcode total!

proc _record {

  # TODO: The optimized build should have symbols!  Don't build with -s.  And
  # then put symbols next to the binary somehow?  How do the symbols packages
  # work?
  #perf record -o perf.data -- _bin/oil.ovm-dbg osh -n benchmarks/testdata/abuild | wc -l

  # call graph recording.  This helps it be less "flat" in opt mode.  Otherwise
  # everything is PyEval_EvalFrameEx.
  local flag='-g'
  local bin=_bin/oil.ovm-opt 
  #local bin=_bin/oil.ovm-dbg  # This shows more details

  local freq=1000  # 1000 Hz

  #local file=benchmarks/testdata/abuild  # small file

  local file=benchmarks/testdata/configure-coreutils  # big file

  time perf record $flag -F $freq -o perf.data -- $bin osh --ast-format none -n $file
  #perf record -o perf.data -- _bin/osh --ast-format none -n benchmarks/testdata/abuild
}
proc record { sudo $0 _record; }

#
# Soil CI
#

proc build-stress-test '{

  # Special _OIL_DEV for -D GC_TIMING
  _OIL_DEV=1' ./configure --without-readline

  mkdir -p _tmp
  c++ -D MARK_SWEEP -I . \
    -O2 -g \
    -o _tmp/gc_stress_test \
    mycpp/gc_stress_test.cc \
    mycpp/mark_sweep_heap.cc \
    mycpp/gc_builtins.cc \
    mycpp/gc_mylib.cc \
    mycpp/gc_str.cc \
    mycpp/hash.cc \
    -lstdc++ 
}

proc profile-stress-test {
  profile-cpp 'gc_stress_test' flat \
    _tmp/gc_stress_test
}

proc print-index {
  echo '<body style="margin: 0 auto; width: 40em; font-size: large">'
  echo '<h1>Perf Profiles</h1>'

  for path in $BASE_DIR/*.txt {
    local filename=$(basename $path)
    echo "<a href="$filename">$filename</a> <br/>"
  }

  echo '</body>'
}

# TODO: fetch the tarball from the cpp-small CI task

proc build-tar {
  local tar=${1:-_release/oils-for-unix.tar}

  setvar tar = "$PWD/$tar"

  local tmp=$BASE_DIR/tar
  mkdir -p $tmp

  pushd $tmp

  tar --extract < $tar
  cd oils-for-unix-*  # glob of 1

  ./configure

  # TODO: add bumproot
  for variant in opt+bumpleak opt {
    echo

    time _build/oils.sh '' $variant
    echo

    _bin/cxx-$variant-sh/osh -c 'echo "hi from $0"'
  }

  # TODO:
  # - profile each executable
  # - add OILS_GC_THRESHOLD=$big to avoid GC

  popd
}

proc soil-run {
  echo 'TODO run benchmarks/gc tasks'
  # But we don't have Ninja
  # Fetch the tarball?

  # Can you WAIT for the tarball?
  # You can wait for the cpp-small task that builds it?  Ah hacky hacky

  build-stress-test

  profile-stress-test

  export-osh-cpp _tmp/native-tar-test opt
  #export-osh-cpp '' opt

  profile-fib $OSH flat
  profile-osh-parse $OSH flat

  print-index > $BASE_DIR/index.html

  echo "Wrote $BASE_DIR/index.html"
}

@ARGV
    (DONE benchmarks/perf.sh)
#!/usr/bin/env bash
#
# Usage:
#   ./run.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar DIR = 'benchmarks/javascript'

# TODO:
# - Use benchmarks/time.py for this and make a table
# - Upgrade quickjs

proc run-all {
  local name=$1
  shift
  set -x

  time $DIR/$name.py @ARGV

  time $DIR/$name.js @ARGV

  time ~/src/duktape-2.5.0/duk $DIR/$name.js @ARGV
  time ~/src/languages/quickjs-2019-07-09/qjs $DIR/$name.js @ARGV

  time bash $DIR/$name.sh @ARGV
  time zsh $DIR/$name.sh @ARGV

  # OSH under CPython: 21.5 seconds.  10x slower.
  time bin/osh $DIR/$name.sh @ARGV
}

# integers is a lot harder for shell than hexstring
# searching through 1000 * 1000 = 1M.

# duktape = 89 ms 
# quickjs = 18 ms  # beats node probably because of startup time
# node = 32 ms
#
# zsh: 1.2 seconds.  bash 2.5 seconds.  So JS has a big advantage here.

proc squares { run-all squares; }

# duktape = 123ms
# quickjs = 71ms
# node.js = 38ms.  Not bad although that may be startup time.
# this is searching through a loop of 16 * 16 * 16 = 4096.
#
# zsh: 150 ms, bash: 165ms.  Not as big an advantage.  But still JS is better
# for code readability.
proc hexstring { run-all hexstring; }


@ARGV
    (DONE benchmarks/javascript/run.sh)
#!/usr/bin/env bash

setvar n = '1000'
setvar x = '10000'

proc main {
  for ((i = 0; i < n; ++i)) {
    for ((j = i; j < n; ++j)) {
      if (( i*i + j*j == x )); then
        echo $i $j
      fi
    }
  }
}

main
    (DONE benchmarks/javascript/squares.sh)
#!/usr/bin/env bash

setvar hexdigits = ''0123456789abcdef''
for c in {0..9} {a..f} {
  for d in {0..9} {a..f} {
    for e in {0..9} {a..f} {
      setvar hexbyte = "$c$d$e"

      setvar byte = "$hexbyte"
      setvar byte = ${byte//0/0000}
      setvar byte = ${byte//1/0001}
      setvar byte = ${byte//2/0010}
      setvar byte = ${byte//3/0011}

      setvar byte = ${byte//4/0100}
      setvar byte = ${byte//5/0101}
      setvar byte = ${byte//6/0110}
      setvar byte = ${byte//7/0111}

      setvar byte = ${byte//8/1000}
      setvar byte = ${byte//9/1001}
      setvar byte = ${byte//a/1010}
      setvar byte = ${byte//b/1011}

      setvar byte = ${byte//c/1100}
      setvar byte = ${byte//d/1101}
      setvar byte = ${byte//e/1110}
      setvar byte = ${byte//f/1111}

      #echo $byte)

      setvar ones = ${byte//0/}
      if test ${#ones} -eq 11 {
        echo $hexbyte $byte
      }
    }
  }
}
    (DONE benchmarks/javascript/hexstring.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/report.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

source benchmarks/common.sh  # maybe-tree
source build/dev-shell.sh  # R_LIBS_USER
source test/common.sh  # log

# TODO: Move stuff from osh-parser.sh, osh-runtime.sh, etc.
#
# stage1 : concatenate files from different machines
# stage2 : make CSV files with report.R
# stage3 : make HTML files.  Call 'print-report' function.


proc stage2 {
  local base_dir=$1  # _tmp/{osh-parser,osh-runtime,...}
  local action=$(basename $base_dir)

  local out=$base_dir/stage2
  mkdir -p $out

  benchmarks/report.R $action $base_dir/stage1 $out

  maybe-tree $out
}

proc stage3 {
  local base_dir=$1  # _tmp/{osh-parser,osh-runtime,...}
  local name=${2:-$(basename $base_dir)}
  local script=benchmarks/$name.sh

  local out=$base_dir/index.html
  mkdir -p $(dirname $out)

  $script print-report $base_dir/stage2 > $out

  echo "Wrote $out"
}

proc osh-parser {
  local base_dir=_tmp/osh-parser

  benchmarks/osh-parser.sh stage1 ../benchmark-data/osh-parser
  stage2 $base_dir
  stage3 $base_dir
}

proc osh-runtime {
  local base_dir=_tmp/osh-runtime

  benchmarks/osh-runtime.sh stage1 ../benchmark-data/osh-runtime
  stage2 $base_dir
  stage3 $base_dir
}

# NOTE: This is just processing
proc vm-baseline {
  local base_dir=_tmp/vm-baseline

  benchmarks/vm-baseline.sh stage1 ../benchmark-data/vm-baseline
  stage2 $base_dir
  stage3 $base_dir
}

proc ovm-build {
  local base_dir=_tmp/ovm-build

  benchmarks/ovm-build.sh stage1 ../benchmark-data/ovm-build
  stage2 $base_dir
  stage3 $base_dir
}

proc compute {
  local base_dir=_tmp/compute

  benchmarks/compute.sh stage1 ../benchmark-data/compute
  stage2 $base_dir
  stage3 $base_dir
}

proc all {
  osh-parser
  osh-runtime
  vm-baseline
  ovm-build
  compute

  # Note:
  # benchmarks/mycpp and benchmarks/gc run on one machine, and are done in
  # benchmarks/auto.sh
}

# For view
proc dev-index {
  local out=_tmp/benchmarks.html
  for name in osh-parser osh-runtime vm-baseline ovm-build {
    echo "<a href=\"$name/index.html\">$name</a> <br/>"
  } > $out
  log "Wrote $out"
}

proc report-test {
  benchmarks/report_test.R
}

@ARGV
    (DONE benchmarks/report.sh)
#!/usr/bin/env bash
#
# Use sys.setprofile() and maybe sys.settrace() to trace Oil execution.
#
# Problem: Python callbacks for sys.setprofile() are too slow I think.
#
# Usage:
#   ./pytrace.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

export PYTHONPATH='.:vendor'

readonly BIGGEST=benchmarks/testdata/configure-coreutils
readonly GIT_COMPLETION=testdata/completion/git
readonly OSH_COMPLETION=../bash-completion/osh_completion
readonly ABUILD=benchmarks/testdata/abuild

readonly -a RUN_ABUILD=(bin/oil.py osh $ABUILD -h)
# Slightly faster but not significantly.
#readonly -a RUN_ABUILD=(_bin/osh $ABUILD -h)
readonly -a OSH_PARSE=(bin/oil.py osh --ast-format none -n)

#
# Use Python's cProfile, which uses _lsprof.  This is pretty fast.
#

proc time-bash-run-abuild { time bash $ABUILD -h; }

# Old: ~2.7 seconds (no tracing)
# 2017/11/27, After ASDL optimization: 0.72 seconds.
proc time-run-abuild { time ${RUN_ABUILD[@]}; }

# ~250 ms
proc time-parse-abuild { time ${OSH_PARSE[@]} $ABUILD; }

# ~160 ms
proc time-parse-git-completion { time ${OSH_PARSE[@]} $GIT_COMPLETION; }
# ~150 ms
proc time-parse-osh-completion { time ${OSH_PARSE[@]} $OSH_COMPLETION; }

# 4.3 seconds on lisa
proc time-parse-biggest { time ${OSH_PARSE[@]} $BIGGEST; }

proc _cprofile {
  local out=$1
  shift
  time python -m cProfile -o $out @ARGV
}

# Takes about 380 ms.
proc cprofile-osh-parse {
  local in=${1:-$ABUILD}
  local out=${2:-abuild.cprofile}
  _cprofile $out ${OSH_PARSE[@]} $in
  ls -l $out
}

proc cprofile-parse-abuild {
  cprofile-osh-parse $ABUILD _tmp/abuild.cprofile
}
proc cprofile-parse-biggest {
  cprofile-osh-parse $BIGGEST _tmp/biggest.cprofile
}
proc cprofile-run-abuild {
  _cprofile _tmp/abuild-run.cprofile ${RUN_ABUILD[@]}
}

# TODO: Why doesn't this run correctly?  The results are different.  Maybe run
# spec tests with bin/osh-cprofile and see where it goes wrong?
readonly pydir=~/src/languages/Python-2.7.15
proc cprofile-pyconfigure {
  readonly REPO_ROOT=$PWD

  cd $pydir"

  PYTHONPATH=$REPO_ROOT:$REPO_ROOT/vendor" \
    time python -m cProfile -o pyconfigure.cprofile \
    $REPO_ROOT/bin/oil.py osh myconfigure
    #_cprofile pyconfigure.cprofile \
}
proc print-pyconfigure { print-cprofile $pydir/pyconfigure.cprofile; }

# TODO: Try uftrace?  I guess you can compare wait4() call duration with bash
# vs. osh?
proc strace-run-abuild {
  #local filter='read,wait4' 
  local filter='execve,wait4' 
  time strace -ff -e $filter ${RUN_ABUILD[@]}
  #time strace -c "${RUN_ABUILD[@]}"
}

# Yeah I understand from this why Chrome Tracing / Flame Graphs are better.
# This format doesn't respect the stack!
# cumtime: bin/oil.py is the top, obviously
proc print-cprofile {
  local profile=${1:-_tmp/abuild.cprofile}
  python -c '
import pstats
import sys
p = pstats.Stats(sys.argv[1])
p.sort_stats("tottime").print_stats()
' $profile
}

#
# My Own Tracing with pytrace.py.  Too slow!
#


# Abuild call/return events:
# Parsing: 4,345,706 events
# Execution: 530,924 events

# Total events:
# 14,918,308
# Actually that is still doable as binary.  Not sure it's viewable in Chrome
# though.
# 14 bytes * 14.9M is 209 MB.

proc abuild-trace '{
  _PY_TRACE=abuild.pytrace' time ${PARSE_ABUILD[@]}
}

#
# Depends on pytracing, which is also too slow.
#

# Trace a parsing function
proc parse {
  #local script=$ABUILD 
  local script=$0
  time bin/oil.py osh --ast-format none -n $script >/dev/null
}

# Trace the execution
proc execute {
  #local script=$ABUILD 
  local script=$0
  #time bin/oil.py osh -c 'echo hi'
  time bin/oil.py osh $0

  ls -l -h *.json
}

# Idea: I Want a flame graph based on determistic data!  That way you get the
# full stack trace.

# It wasn't happening in the python-flamegraph stuff for some reason.  Not sure
# why.  I think it is because I/O was exaggerated.
# 
# Interpreter hook:
#
# for thread_id, frame in sys._current_frames().items():
#   if thread_id == my_thread:
#     continue

# Note that opening file descriptors can cause bugs!  I have to open it above
# descriptor 10!

# python-flamegraph
# - suffers from measurement error due to threads.  
# - is RunCommandSub is being inflated?
#    - well actually i'm not sure.  I have to do it myself on a single thread
#    and see.
# pytracing:
# - the format is too bloated.  It can't handle abuild -h.  So I have to
# optimize it.
#
# I want to unify these two approaches: both flame graphs and function traces.
#
# Advantage: sys.setprofile() gets C function call events!
#
# Reservoir Sampling!  Get a list of all unique stacks.
#
# You can figure out the stack from the current/call/return sequence.  So you
# can use the reservoir sampling algorithm to get say 100,000 random stacks out
# of 14 M events.
#
# sys.getframes()

@ARGV
    (DONE benchmarks/pytrace.sh)
#!/usr/bin/env bash

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.

# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files.

set -o errexit
set -o nounset
set -o pipefail

proc setup-os-params {
  # Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
  # /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
  # now, set a generic core_pattern that users can work with.
  echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}

proc config-ip-firewall {
  echo "Configuring IP firewall rules"
  # The GCI image has host firewall which drop most inbound/forwarded packets.
  # We need to add rules to accept all TCP/UDP/ICMP packets.
  if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null {
    echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
    iptables -A INPUT -w -p TCP -j ACCEPT
    iptables -A INPUT -w -p UDP -j ACCEPT
    iptables -A INPUT -w -p ICMP -j ACCEPT
  }
  if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null {
    echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
    iptables -A FORWARD -w -p TCP -j ACCEPT
    iptables -A FORWARD -w -p UDP -j ACCEPT
    iptables -A FORWARD -w -p ICMP -j ACCEPT
  }

  iptables -N KUBE-METADATA-SERVER
  iptables -I FORWARD -p tcp -d 169.254.169.254 --dport 80 -j KUBE-METADATA-SERVER

  if [[ -n "${KUBE_FIREWALL_METADATA_SERVER:-}" ]] {
    iptables -A KUBE-METADATA-SERVER -j DROP
  }
}

proc create-dirs {
  echo "Creating required directories"
  mkdir -p /var/lib/kubelet
  mkdir -p /etc/kubernetes/manifests
  if [[ "${KUBERNETES_MASTER:-}" == "false" ]] {
    mkdir -p /var/lib/kube-proxy
  }
}

# Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
proc safe-format-and-mount {
  setvar device = "$1"
  setvar mountpoint = "$2"

  # Format only if the disk is not already formatted.
  if ! tune2fs -l ${device}  {
    echo "Formatting '${device}'"
    mkfs.ext4 -F ${device}
  }

  mkdir -p ${mountpoint}
  echo "Mounting '${device}' at '${mountpoint}'"
  mount -o discard,defaults ${device} ${mountpoint}
}

# Local ssds, if present, are mounted at /mnt/disks/ssdN.
proc ensure-local-ssds {
  for ssd in /dev/disk/by-id/google-local-ssd-* {
    if test -e ${ssd} {
      setvar ssdnum = $(echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/)
      setvar ssdmount = ""/mnt/disks/ssd${ssdnum}/""
      mkdir -p ${ssdmount}
      safe-format-and-mount ${ssd} ${ssdmount}
      echo "Mounted local SSD $ssd at ${ssdmount}"
      chmod a+w ${ssdmount}
    } else {
      echo "No local SSD disks found."
    }
  }
}

# Installs logrotate configuration files
proc setup-logrotate {
  mkdir -p /etc/logrotate.d/
  # Configure log rotation for all logs in /var/log, which is where k8s services
  # are configured to write their log files. Whenever logrotate is ran, this
  # config will:
  # * rotate the log file if its size is > 100Mb OR if one day has elapsed
  # * save rotated logs into a gzipped timestamped backup
  # * log file timestamp (controlled by 'dateformat') includes seconds too. This
  #   ensures that logrotate can generate unique logfiles during each rotation
  #   (otherwise it skips rotation if 'maxsize' is reached multiple times in a
  #   day).
  # * keep only 5 old (rotated) logs, and will discard older logs.
  cat > /etc/logrotate.d/allvarlogs <<< """
/var/log/*.log {
    rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
    copytruncate
    missingok
    notifempty
    compress
    maxsize ${LOGROTATE_MAX_SIZE:-100M}
    daily
    dateext
    dateformat -%Y%m%d-%s
    create 0644 root root
}
"""

}

# Finds the master PD device; returns it in MASTER_PD_DEVICE
proc find-master-pd {
  setvar MASTER_PD_DEVICE = """"
  if [[ ! -e /dev/disk/by-id/google-master-pd ]] {
    return
  }
  setvar device_info = $(ls -l /dev/disk/by-id/google-master-pd)
  setvar relative_path = ${device_info##* }
  setvar MASTER_PD_DEVICE = ""/dev/disk/by-id/${relative_path}""
}

# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
proc mount-master-pd {
  find-master-pd
  if [[ -z "${MASTER_PD_DEVICE:-}" ]] {
    return
  }

  echo "Mounting master-pd"
  local -r pd_path="/dev/disk/by-id/google-master-pd"
  local -r mount_point="/mnt/disks/master-pd"
  # Format and mount the disk, create directories on it for all of the master's
  # persistent data, and link them to where they're used.
  mkdir -p ${mount_point}
  safe-format-and-mount ${pd_path} ${mount_point}
  echo "Mounted master-pd '${pd_path}' at '${mount_point}'"

  # NOTE: These locations on the PD store persistent data, so to maintain
  # upgradeability, these locations should not change.  If they do, take care
  # to maintain a migration path from these locations to whatever new
  # locations.

  # Contains all the data stored in etcd.
  mkdir -m 700 -p "${mount_point}/var/etcd"
  ln -s -f "${mount_point}/var/etcd" /var/etcd
  mkdir -p /etc/srv
  # Contains the dynamically generated apiserver auth certs and keys.
  mkdir -p "${mount_point}/srv/kubernetes"
  ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
  # Directory for kube-apiserver to store SSH key (if necessary).
  mkdir -p "${mount_point}/srv/sshproxy"
  ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy

  if ! id etcd &>/dev/null {
    useradd -s /sbin/nologin -d /var/etcd etcd
  }
  chown -R etcd "${mount_point}/var/etcd"
  chgrp -R etcd "${mount_point}/var/etcd"
}

# append_or_replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
proc append_or_replace_prefixed_line {
  local -r file="${1:-}"
  local -r prefix="${2:-}"
  local -r suffix="${3:-}"
  local -r dirname="$(dirname ${file})"
  local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"

  touch ${file}
  awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" ${file} > "${tmpfile}"
  echo "${prefix}${suffix}" >> "${tmpfile}"
  mv ${tmpfile} ${file}
}

proc write-pki-data {
  local data="${1}"
  local path="${2}"
  shell {umask 077; echo ${data} | base64 --decode > "${path}"}
}

proc create-node-pki {
  echo "Creating node pki files"

  local -r pki_dir="/etc/srv/kubernetes/pki"
  mkdir -p ${pki_dir}

  if [[ -z "${CA_CERT_BUNDLE:-}" ]] {
    setvar CA_CERT_BUNDLE = "${CA_CERT}"
  }

  setvar CA_CERT_BUNDLE_PATH = ""${pki_dir}/ca-certificates.crt""
  write-pki-data ${CA_CERT_BUNDLE} ${CA_CERT_BUNDLE_PATH}

  if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]] {
    setvar KUBELET_CERT_PATH = ""${pki_dir}/kubelet.crt""
    write-pki-data ${KUBELET_CERT} ${KUBELET_CERT_PATH}

    setvar KUBELET_KEY_PATH = ""${pki_dir}/kubelet.key""
    write-pki-data ${KUBELET_KEY} ${KUBELET_KEY_PATH}
  }

  # TODO(mikedanese): remove this when we don't support downgrading to versions
  # < 1.6.
  ln -sf ${CA_CERT_BUNDLE_PATH} /etc/srv/kubernetes/ca.crt
}

proc create-master-pki {
  echo "Creating master pki files"

  local -r pki_dir="/etc/srv/kubernetes/pki"
  mkdir -p ${pki_dir}

  setvar CA_CERT_PATH = ""${pki_dir}/ca.crt""
  write-pki-data ${CA_CERT} ${CA_CERT_PATH}

  # this is not true on GKE
  if [[ ! -z "${CA_KEY:-}" ]] {
    setvar CA_KEY_PATH = ""${pki_dir}/ca.key""
    write-pki-data ${CA_KEY} ${CA_KEY_PATH}
  }

  if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]] {
    setvar APISERVER_SERVER_CERT = "${MASTER_CERT}"
    setvar APISERVER_SERVER_KEY = "${MASTER_KEY}"
  }

  setvar APISERVER_SERVER_CERT_PATH = ""${pki_dir}/apiserver.crt""
  write-pki-data ${APISERVER_SERVER_CERT} ${APISERVER_SERVER_CERT_PATH}

  setvar APISERVER_SERVER_KEY_PATH = ""${pki_dir}/apiserver.key""
  write-pki-data ${APISERVER_SERVER_KEY} ${APISERVER_SERVER_KEY_PATH}

  if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]] {
    setvar APISERVER_CLIENT_CERT = "${KUBEAPISERVER_CERT}"
    setvar APISERVER_CLIENT_KEY = "${KUBEAPISERVER_KEY}"
  }

  setvar APISERVER_CLIENT_CERT_PATH = ""${pki_dir}/apiserver-client.crt""
  write-pki-data ${APISERVER_CLIENT_CERT} ${APISERVER_CLIENT_CERT_PATH}

  setvar APISERVER_CLIENT_KEY_PATH = ""${pki_dir}/apiserver-client.key""
  write-pki-data ${APISERVER_CLIENT_KEY} ${APISERVER_CLIENT_KEY_PATH}

  if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]] {
    setvar SERVICEACCOUNT_CERT = "${MASTER_CERT}"
    setvar SERVICEACCOUNT_KEY = "${MASTER_KEY}"
  }

  setvar SERVICEACCOUNT_CERT_PATH = ""${pki_dir}/serviceaccount.crt""
  write-pki-data ${SERVICEACCOUNT_CERT} ${SERVICEACCOUNT_CERT_PATH}

  setvar SERVICEACCOUNT_KEY_PATH = ""${pki_dir}/serviceaccount.key""
  write-pki-data ${SERVICEACCOUNT_KEY} ${SERVICEACCOUNT_KEY_PATH}

  # TODO(mikedanese): remove this when we don't support downgrading to versions
  # < 1.6.
  ln -sf ${APISERVER_SERVER_KEY_PATH} /etc/srv/kubernetes/server.key
  ln -sf ${APISERVER_SERVER_CERT_PATH} /etc/srv/kubernetes/server.cert

  if [[ ! -z "${REQUESTHEADER_CA_CERT:-}" ]] {
    setvar AGGREGATOR_CA_KEY_PATH = ""${pki_dir}/aggr_ca.key""
    write-pki-data ${AGGREGATOR_CA_KEY} ${AGGREGATOR_CA_KEY_PATH}

    setvar REQUESTHEADER_CA_CERT_PATH = ""${pki_dir}/aggr_ca.crt""
    write-pki-data ${REQUESTHEADER_CA_CERT} ${REQUESTHEADER_CA_CERT_PATH}

    setvar PROXY_CLIENT_KEY_PATH = ""${pki_dir}/proxy_client.key""
    write-pki-data ${PROXY_CLIENT_KEY} ${PROXY_CLIENT_KEY_PATH}

    setvar PROXY_CLIENT_CERT_PATH = ""${pki_dir}/proxy_client.crt""
    write-pki-data ${PROXY_CLIENT_CERT} ${PROXY_CLIENT_CERT_PATH}
  }
}

# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
# enabled. In that case the basic_auth.csv file will be rewritten to make
# sure it matches the metadata source of truth.
proc create-master-auth {
  echo "Creating master auth files"
  local -r auth_dir="/etc/srv/kubernetes"
  local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
  if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]] {
    if [[ -e "${basic_auth_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]] {
      # If METADATA_CLOBBERS_CONFIG is true, we want to rewrite the file
      # completely, because if we're changing KUBE_USER and KUBE_PASSWORD, we
      # have nothing to match on.  The file is replaced just below with
      # append_or_replace_prefixed_line.
      rm ${basic_auth_csv}
    }
    append_or_replace_prefixed_line ${basic_auth_csv} "${KUBE_PASSWORD},${KUBE_USER},"      "admin,system:masters"
  }

  local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
  if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]] {
    rm ${known_tokens_csv}
  }
  if [[ -n "${KUBE_BEARER_TOKEN:-}" ]] {
    append_or_replace_prefixed_line ${known_tokens_csv} "${KUBE_BEARER_TOKEN},"             "admin,admin,system:masters"
  }
  if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]] {
    append_or_replace_prefixed_line ${known_tokens_csv} "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
  }
  if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]] {
    append_or_replace_prefixed_line ${known_tokens_csv} "${KUBE_SCHEDULER_TOKEN},"          "system:kube-scheduler,uid:system:kube-scheduler"
  }
  if [[ -n "${KUBE_PROXY_TOKEN:-}" ]] {
    append_or_replace_prefixed_line ${known_tokens_csv} "${KUBE_PROXY_TOKEN},"              "system:kube-proxy,uid:kube_proxy"
  }
  if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]] {
    append_or_replace_prefixed_line ${known_tokens_csv} "${NODE_PROBLEM_DETECTOR_TOKEN},"   "system:node-problem-detector,uid:node-problem-detector"
  }
  local use_cloud_config="false"
  cat <<< """ >/etc/gce.conf
[global]
"""
>/etc/gce.conf
[global]
EOF
  if [[ -n "${GCE_API_ENDPOINT:-}" ]] {
    cat <<< """ >>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
"""
>>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
EOF
  }
  if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
"""
>>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
  }
  if [[ -n "${PROJECT_ID:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
project-id = ${PROJECT_ID}
"""
>>/etc/gce.conf
project-id = ${PROJECT_ID}
EOF
  }
  if [[ -n "${NETWORK_PROJECT_ID:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
"""
>>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
EOF
  }
  if [[ -n "${NODE_NETWORK:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
network-name = ${NODE_NETWORK}
"""
>>/etc/gce.conf
network-name = ${NODE_NETWORK}
EOF
  }
  if [[ -n "${NODE_SUBNETWORK:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
"""
>>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
  }
  if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]] {
    setvar use_cloud_config = ""true""
    if [[ -n "${NODE_TAGS:-}" ]] {
      local -r node_tags="${NODE_TAGS}"
    } else {
      local -r node_tags="${NODE_INSTANCE_PREFIX}"
    }
    cat <<< """ >>/etc/gce.conf
node-tags = ${node_tags}
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
"""
>>/etc/gce.conf
node-tags = ${node_tags}
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
  }
  if [[ -n "${MULTIZONE:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
multizone = ${MULTIZONE}
"""
>>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
  }
  if [[ -n "${GCE_ALPHA_FEATURES:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >>/etc/gce.conf
alpha-features = ${GCE_ALPHA_FEATURES}
"""
>>/etc/gce.conf
alpha-features = ${GCE_ALPHA_FEATURES}
EOF
  }
  if [[ -n "${SECONDARY_RANGE_NAME:-}" ]] {
    setvar use_cloud_config = ""true""
    cat <<< """ >> /etc/gce.conf
secondary-range-name = ${SECONDARY-RANGE-NAME}
"""
>> /etc/gce.conf
secondary-range-name = ${SECONDARY-RANGE-NAME}
EOF
  }
  if [[ "${use_cloud_config}" != "true" ]] {
    rm -f /etc/gce.conf
  }

  if [[ -n "${GCP_AUTHN_URL:-}" ]] {
    cat <<< """ >/etc/gcp_authn.config
clusters:
  - name: gcp-authentication-server
    cluster:
      server: ${GCP_AUTHN_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-authentication-server
    user: kube-apiserver
  name: webhook
"""
>/etc/gcp_authn.config
clusters:
  - name: gcp-authentication-server
    cluster:
      server: ${GCP_AUTHN_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-authentication-server
    user: kube-apiserver
  name: webhook
EOF
  }

  if [[ -n "${GCP_AUTHZ_URL:-}" ]] {
    cat <<< """ >/etc/gcp_authz.config
clusters:
  - name: gcp-authorization-server
    cluster:
      server: ${GCP_AUTHZ_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-authorization-server
    user: kube-apiserver
  name: webhook
"""
>/etc/gcp_authz.config
clusters:
  - name: gcp-authorization-server
    cluster:
      server: ${GCP_AUTHZ_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-authorization-server
    user: kube-apiserver
  name: webhook
EOF
  }

if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]] {
    # This is the config file for the image review webhook.
    cat <<< """ >/etc/gcp_image_review.config
clusters:
  - name: gcp-image-review-server
    cluster:
      server: ${GCP_IMAGE_VERIFICATION_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-image-review-server
    user: kube-apiserver
  name: webhook
"""
>/etc/gcp_image_review.config
clusters:
  - name: gcp-image-review-server
    cluster:
      server: ${GCP_IMAGE_VERIFICATION_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-image-review-server
    user: kube-apiserver
  name: webhook
EOF
    # This is the config for the image review admission controller.
    cat <<< """ >/etc/admission_controller.config
imagePolicy:
  kubeConfigFile: /etc/gcp_image_review.config
  allowTTL: 30
  denyTTL: 30
  retryBackoff: 500
  defaultAllow: true
"""
>/etc/admission_controller.config
imagePolicy:
  kubeConfigFile: /etc/gcp_image_review.config
  allowTTL: 30
  denyTTL: 30
  retryBackoff: 500
  defaultAllow: true
EOF
  }
}

# Write the config for the audit policy.
proc create-master-audit-policy {
  local -r path="${1}"
  local -r policy="${2:-}"

  if [[ -n "${policy}" ]] {
    echo ${policy} > "${path}"
    return
  }

  # Known api groups
  local -r known_apis='
      - group: "" # core
      - group: "admissionregistration.k8s.io"
      - group: "apiextensions.k8s.io"
      - group: "apiregistration.k8s.io"
      - group: "apps"
      - group: "authentication.k8s.io"
      - group: "authorization.k8s.io"
      - group: "autoscaling"
      - group: "batch"
      - group: "certificates.k8s.io"
      - group: "extensions"
      - group: "metrics.k8s.io"
      - group: "networking.k8s.io"
      - group: "policy"
      - group: "rbac.authorization.k8s.io"
      - group: "settings.k8s.io"
      - group: "storage.k8s.io"'

  cat <<< """ >"${path}"
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
  # The following requests were manually identified as high-volume and low-risk,
  # so drop them.
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
      - group: "" # core
        resources: ["endpoints", "services", "services/status"]
  - level: None
    # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
    # TODO(#46983): Change this to the ingress controller service account.
    users: ["system:unsecured"]
    namespaces: ["kube-system"]
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["configmaps"]
  - level: None
    users: ["kubelet"] # legacy kubelet identity
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["nodes", "nodes/status"]
  - level: None
    userGroups: ["system:nodes"]
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["nodes", "nodes/status"]
  - level: None
    users:
      - system:kube-controller-manager
      - system:kube-scheduler
      - system:serviceaccount:kube-system:endpoint-controller
    verbs: ["get", "update"]
    namespaces: ["kube-system"]
    resources:
      - group: "" # core
        resources: ["endpoints"]
  - level: None
    users: ["system:apiserver"]
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
  # Don't log HPA fetching metrics.
  - level: None
    users:
      - system:kube-controller-manager
    verbs: ["get", "list"]
    resources:
      - group: "metrics.k8s.io"

  # Don't log these read-only URLs.
  - level: None
    nonResourceURLs:
      - /healthz*
      - /version
      - /swagger*

  # Don't log events requests.
  - level: None
    resources:
      - group: "" # core
        resources: ["events"]

  # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  - level: Request
    users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
    verbs: ["update","patch"]
    resources:
      - group: "" # core
        resources: ["nodes/status", "pods/status"]
    omitStages:
      - "RequestReceived"
  - level: Request
    userGroups: ["system:nodes"]
    verbs: ["update","patch"]
    resources:
      - group: "" # core
        resources: ["nodes/status", "pods/status"]
    omitStages:
      - "RequestReceived"

  # deletecollection calls can be large, don't log responses for expected namespace deletions
  - level: Request
    users: ["system:serviceaccount:kube-system:namespace-controller"]
    verbs: ["deletecollection"]
    omitStages:
      - "RequestReceived"

  # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  # so only log at the Metadata level.
  - level: Metadata
    resources:
      - group: "" # core
        resources: ["secrets", "configmaps"]
      - group: authentication.k8s.io
        resources: ["tokenreviews"]
    omitStages:
      - "RequestReceived"
  # Get repsonses can be large; skip them.
  - level: Request
    verbs: ["get", "list", "watch"]
    resources: ${known_apis}
    omitStages:
      - "RequestReceived"
  # Default level for known APIs
  - level: RequestResponse
    resources: ${known_apis}
    omitStages:
      - "RequestReceived"
  # Default level for all other requests.
  - level: Metadata
    omitStages:
      - "RequestReceived"
"""
>"${path}"
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
  # The following requests were manually identified as high-volume and low-risk,
  # so drop them.
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
      - group: "" # core
        resources: ["endpoints", "services", "services/status"]
  - level: None
    # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
    # TODO(#46983): Change this to the ingress controller service account.
    users: ["system:unsecured"]
    namespaces: ["kube-system"]
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["configmaps"]
  - level: None
    users: ["kubelet"] # legacy kubelet identity
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["nodes", "nodes/status"]
  - level: None
    userGroups: ["system:nodes"]
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["nodes", "nodes/status"]
  - level: None
    users:
      - system:kube-controller-manager
      - system:kube-scheduler
      - system:serviceaccount:kube-system:endpoint-controller
    verbs: ["get", "update"]
    namespaces: ["kube-system"]
    resources:
      - group: "" # core
        resources: ["endpoints"]
  - level: None
    users: ["system:apiserver"]
    verbs: ["get"]
    resources:
      - group: "" # core
        resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
  # Don't log HPA fetching metrics.
  - level: None
    users:
      - system:kube-controller-manager
    verbs: ["get", "list"]
    resources:
      - group: "metrics.k8s.io"

  # Don't log these read-only URLs.
  - level: None
    nonResourceURLs:
      - /healthz*
      - /version
      - /swagger*

  # Don't log events requests.
  - level: None
    resources:
      - group: "" # core
        resources: ["events"]

  # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  - level: Request
    users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
    verbs: ["update","patch"]
    resources:
      - group: "" # core
        resources: ["nodes/status", "pods/status"]
    omitStages:
      - "RequestReceived"
  - level: Request
    userGroups: ["system:nodes"]
    verbs: ["update","patch"]
    resources:
      - group: "" # core
        resources: ["nodes/status", "pods/status"]
    omitStages:
      - "RequestReceived"

  # deletecollection calls can be large, don't log responses for expected namespace deletions
  - level: Request
    users: ["system:serviceaccount:kube-system:namespace-controller"]
    verbs: ["deletecollection"]
    omitStages:
      - "RequestReceived"

  # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  # so only log at the Metadata level.
  - level: Metadata
    resources:
      - group: "" # core
        resources: ["secrets", "configmaps"]
      - group: authentication.k8s.io
        resources: ["tokenreviews"]
    omitStages:
      - "RequestReceived"
  # Get repsonses can be large; skip them.
  - level: Request
    verbs: ["get", "list", "watch"]
    resources: ${known_apis}
    omitStages:
      - "RequestReceived"
  # Default level for known APIs
  - level: RequestResponse
    resources: ${known_apis}
    omitStages:
      - "RequestReceived"
  # Default level for all other requests.
  - level: Metadata
    omitStages:
      - "RequestReceived"
EOF
}

# Writes the configuration file used by the webhook advanced auditing backend.
proc create-master-audit-webhook-config {
  local -r path="${1}"

  if [[ -n "${GCP_AUDIT_URL:-}" ]] {
    # The webhook config file is a kubeconfig file describing the webhook endpoint.
    cat <<< """ >"${path}"
clusters:
  - name: gcp-audit-server
    cluster:
      server: ${GCP_AUDIT_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-audit-server
    user: kube-apiserver
  name: webhook
"""
>"${path}"
clusters:
  - name: gcp-audit-server
    cluster:
      server: ${GCP_AUDIT_URL}
users:
  - name: kube-apiserver
    user:
      auth-provider:
        name: gcp
current-context: webhook
contexts:
- context:
    cluster: gcp-audit-server
    user: kube-apiserver
  name: webhook
EOF
  }
}

# Arg 1: the IP address of the API server
proc create-kubelet-kubeconfig {
  local apiserver_address="${1}"
  if [[ -z "${apiserver_address}" ]] {
    echo "Must provide API server address to create Kubelet kubeconfig file!"
    exit 1
  }
  echo "Creating kubelet kubeconfig file"
  cat <<< """ >/var/lib/kubelet/bootstrap-kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
  user:
    client-certificate: ${KUBELET_CERT_PATH}
    client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
  cluster:
    server: https://${apiserver_address}
    certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
    cluster: local
    user: kubelet
  name: service-account-context
current-context: service-account-context
"""
>/var/lib/kubelet/bootstrap-kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
  user:
    client-certificate: ${KUBELET_CERT_PATH}
    client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
  cluster:
    server: https://${apiserver_address}
    certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
    cluster: local
    user: kubelet
  name: service-account-context
current-context: service-account-context
EOF
}

# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
proc create-master-kubelet-auth {
  # Only configure the kubelet on the master if the required variables are
  # set in the environment.
  if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]] {
    setvar REGISTER_MASTER_KUBELET = ""true""
    create-kubelet-kubeconfig ${KUBELET_APISERVER}
  }
}

proc create-kubeproxy-user-kubeconfig {
  echo "Creating kube-proxy user kubeconfig file"
  cat <<< """ >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
  user:
    token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
  cluster:
    certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
    cluster: local
    user: kube-proxy
  name: service-account-context
current-context: service-account-context
"""
>/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
  user:
    token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
  cluster:
    certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
    cluster: local
    user: kube-proxy
  name: service-account-context
current-context: service-account-context
EOF
}

proc create-kubecontrollermanager-kubeconfig {
  echo "Creating kube-controller-manager kubeconfig file"
  mkdir -p /etc/srv/kubernetes/kube-controller-manager
  cat <<< """ >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
  user:
    token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
  cluster:
    insecure-skip-tls-verify: true
    server: https://localhost:443
contexts:
- context:
    cluster: local
    user: kube-controller-manager
  name: service-account-context
current-context: service-account-context
"""
>/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
  user:
    token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
  cluster:
    insecure-skip-tls-verify: true
    server: https://localhost:443
contexts:
- context:
    cluster: local
    user: kube-controller-manager
  name: service-account-context
current-context: service-account-context
EOF
}

proc create-kubescheduler-kubeconfig {
  echo "Creating kube-scheduler kubeconfig file"
  mkdir -p /etc/srv/kubernetes/kube-scheduler
  cat <<< """ >/etc/srv/kubernetes/kube-scheduler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
  user:
    token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
  cluster:
    insecure-skip-tls-verify: true
    server: https://localhost:443
contexts:
- context:
    cluster: local
    user: kube-scheduler
  name: kube-scheduler
current-context: kube-scheduler
"""
>/etc/srv/kubernetes/kube-scheduler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
  user:
    token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
  cluster:
    insecure-skip-tls-verify: true
    server: https://localhost:443
contexts:
- context:
    cluster: local
    user: kube-scheduler
  name: kube-scheduler
current-context: kube-scheduler
EOF
}

proc create-node-problem-detector-kubeconfig {
  echo "Creating node-problem-detector kubeconfig file"
  mkdir -p /var/lib/node-problem-detector
  cat <<< """ >/var/lib/node-problem-detector/kubeconfig
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
  user:
    token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: local
  cluster:
    certificate-authority-data: ${CA_CERT}
contexts:
- context:
    cluster: local
    user: node-problem-detector
  name: service-account-context
current-context: service-account-context
"""
>/var/lib/node-problem-detector/kubeconfig
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
  user:
    token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: local
  cluster:
    certificate-authority-data: ${CA_CERT}
contexts:
- context:
    cluster: local
    user: node-problem-detector
  name: service-account-context
current-context: service-account-context
EOF
}

proc create-master-etcd-auth {
  if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]] {
    local -r auth_dir="/etc/srv/kubernetes"
    echo ${ETCD_CA_CERT} | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
    echo ${ETCD_PEER_KEY} | base64 --decode > "${auth_dir}/etcd-peer.key"
    echo ${ETCD_PEER_CERT} | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
  }
}

proc assemble-docker-flags {
  echo "Assemble docker command line flags"
  local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
  if [[ "${TEST_CLUSTER:-}" == "true" ]] {
    setvar docker_opts = "" --log-level=debug""
  } else {
    setvar docker_opts = "" --log-level=warn""
  }
  local use_net_plugin="true"
  if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]] {
    # set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
    setvar docker_opts = "" --bip=169.254.123.1/24""
  } else {
    setvar use_net_plugin = ""false""
    setvar docker_opts = "" --bridge=cbr0""
  }

  # Decide whether to enable a docker registry mirror. This is taken from
  # the "kube-env" metadata value.
  if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]] {
    echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
    setvar docker_opts = "" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}""
  }

  # Configure docker logging
  setvar docker_opts = "" --log-driver=${DOCKER_LOG_DRIVER:-json-file}""
  setvar docker_opts = "" --log-opt=max-size=${DOCKER_LOG_MAX_SIZE:-10m}""
  setvar docker_opts = "" --log-opt=max-file=${DOCKER_LOG_MAX_FILE:-5}""

  echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker

  if [[ "${use_net_plugin}" == "true" ]] {
    # If using a network plugin, extend the docker configuration to always remove
    # the network checkpoint to avoid corrupt checkpoints.
    # (https://github.com/docker/docker/issues/18283).
    echo "Extend the docker.service configuration to remove the network checkpiont"
    mkdir -p /etc/systemd/system/docker.service.d
    cat <<< """ >/etc/systemd/system/docker.service.d/01network.conf
[Service]
ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network"
"""
>/etc/systemd/system/docker.service.d/01network.conf
[Service]
ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network"
EOF
  }

  # Ensure TasksMax is sufficient for docker.
  # (https://github.com/kubernetes/kubernetes/issues/51977)
  echo "Extend the docker.service configuration to set a higher pids limit"
  mkdir -p /etc/systemd/system/docker.service.d
  cat <<< """ >/etc/systemd/system/docker.service.d/02tasksmax.conf
[Service]
TasksMax=infinity
"""
>/etc/systemd/system/docker.service.d/02tasksmax.conf
[Service]
TasksMax=infinity
EOF

    systemctl daemon-reload
    echo "Docker command line is updated. Restart docker to pick it up"
    systemctl restart docker
}

# This function assembles the kubelet systemd service file and starts it
# using systemctl.
proc start-kubelet {
  echo "Start kubelet"

  local -r kubelet_cert_dir="/var/lib/kubelet/pki/"
  mkdir -p ${kubelet_cert_dir}

  local kubelet_bin="${KUBE_HOME}/bin/kubelet"
  local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
  local -r builtin_kubelet="/usr/bin/kubelet"
  if [[ "${TEST_CLUSTER:-}" == "true" ]] {
    # Determine which binary to use on test clusters. We use the built-in
    # version only if the downloaded version is the same as the built-in
    # version. This allows GCI to run some of the e2e tests to qualify the
    # built-in kubelet.
    if [[ -x "${builtin_kubelet}" ]] {
      local -r builtin_version="$("${builtin_kubelet}"  --version=true | cut -f2 -d " ")"
      if [[ "${builtin_version}" == "${version}" ]] {
        setvar kubelet_bin = "${builtin_kubelet}"
      }
    }
  }
  echo "Using kubelet binary at ${kubelet_bin}"
  local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
  setvar flags = "" --allow-privileged=true""
  setvar flags = "" --cgroup-root=/""
  setvar flags = "" --cloud-provider=gce""
  setvar flags = "" --cluster-dns=${DNS_SERVER_IP}""
  setvar flags = "" --cluster-domain=${DNS_DOMAIN}""
  setvar flags = "" --pod-manifest-path=/etc/kubernetes/manifests""
  setvar flags = "" --experimental-mounter-path=${CONTAINERIZED_MOUNTER_HOME}/mounter""
  setvar flags = "" --experimental-check-node-capabilities-before-mount=true""
  setvar flags = "" --cert-dir=${kubelet_cert_dir}""

  if [[ -n "${KUBELET_PORT:-}" ]] {
    setvar flags = "" --port=${KUBELET_PORT}""
  }
  if [[ "${KUBERNETES_MASTER:-}" == "true" ]] {
    setvar flags = "" ${MASTER_KUBELET_TEST_ARGS:-}""
    setvar flags = "" --enable-debugging-handlers=false""
    setvar flags = "" --hairpin-mode=none""
    if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]] {
      #TODO(mikedanese): allow static pods to start before creating a client
      #flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
      #flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
      setvar flags = "" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig""
      setvar flags = "" --register-schedulable=false""
    } else {
      # Standalone mode (not widely used?)
      setvar flags = "" --pod-cidr=${MASTER_IP_RANGE}""
    }
  } else { # For nodes
    setvar flags = "" ${NODE_KUBELET_TEST_ARGS:-}""
    setvar flags = "" --enable-debugging-handlers=true""
    setvar flags = "" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig""
    setvar flags = "" --kubeconfig=/var/lib/kubelet/kubeconfig""
    if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
       [[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
       [[ "${HAIRPIN_MODE:-}" == "none" ]] {
      setvar flags = "" --hairpin-mode=${HAIRPIN_MODE}""
    }
    setvar flags = "" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=${CA_CERT_BUNDLE_PATH}""
  }
  # Network plugin
  if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]] {
    setvar flags = "" --cni-bin-dir=/home/kubernetes/bin""
    if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]] {
      # Calico uses CNI always.
      if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" ]] {
        setvar flags = "" --network-plugin=${NETWORK_PROVIDER}""
      } else {
        setvar flags = "" --network-plugin=cni""
      }
    } else {
      # Otherwise use the configured value.
      setvar flags = "" --network-plugin=${NETWORK_PROVIDER}""
    }
  }
  if [[ -n "${NON_MASQUERADE_CIDR:-}" ]] {
    setvar flags = "" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}""
  }
  # FlexVolume plugin
  if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]] {
    setvar flags = "" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}""
  }
  if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]] {
    setvar flags = "" --manifest-url=${MANIFEST_URL}""
    setvar flags = "" --manifest-url-header=${MANIFEST_URL_HEADER}""
  }
  if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]] {
    setvar flags = "" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}""
  }
  local node_labels=""
  if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${KUBERNETES_MASTER:-}" != "true" ]] {
    # Add kube-proxy daemonset label to node to avoid situation during cluster
    # upgrade/downgrade when there are two instances of kube-proxy running on a node.
    setvar node_labels = ""beta.kubernetes.io/kube-proxy-ds-ready=true""
  }
  if [[ -n "${NODE_LABELS:-}" ]] {
    setvar node_labels = ""${node_labels:+${node_labels},}${NODE_LABELS}""
  }
  if [[ -n "${node_labels:-}" ]] {
    setvar flags = "" --node-labels=${node_labels}""
  }
  if [[ -n "${NODE_TAINTS:-}" ]] {
    setvar flags = "" --register-with-taints=${NODE_TAINTS}""
  }
  if [[ -n "${EVICTION_HARD:-}" ]] {
    setvar flags = "" --eviction-hard=${EVICTION_HARD}""
  }
  if [[ -n "${FEATURE_GATES:-}" ]] {
    setvar flags = "" --feature-gates=${FEATURE_GATES}""
  }
  if [[ -n "${ROTATE_CERTIFICATES:-}" ]] {
    setvar flags = "" --rotate-certificates=true""
  }

  local -r kubelet_env_file="/etc/default/kubelet"
  echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"

  # Write the systemd service file for kubelet.
  cat <<< """ >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target

[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} '$'KUBELET_OPTS

[Install]
WantedBy=multi-user.target
"""
>/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target

[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS

[Install]
WantedBy=multi-user.target
EOF

  # Flush iptables nat table
  iptables -t nat -F || true

  systemctl start kubelet.service
}

# This function assembles the node problem detector systemd service file and
# starts it using systemctl.
proc start-node-problem-detector {
  echo "Start node problem detector"
  local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
  local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
  local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
  echo "Using node problem detector binary at ${npd_bin}"
  local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
  setvar flags = "" --logtostderr""
  setvar flags = "" --system-log-monitors=${km_config},${dm_config}""
  setvar flags = "" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig""
  local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
  setvar flags = "" --port=${npd_port}""

  # Write the systemd service file for node problem detector.
  cat <<< """ >/etc/systemd/system/node-problem-detector.service
[Unit]
Description=Kubernetes node problem detector
Requires=network-online.target
After=network-online.target

[Service]
Restart=always
RestartSec=10
ExecStart=${npd_bin} ${flags}

[Install]
WantedBy=multi-user.target
"""
>/etc/systemd/system/node-problem-detector.service
[Unit]
Description=Kubernetes node problem detector
Requires=network-online.target
After=network-online.target

[Service]
Restart=always
RestartSec=10
ExecStart=${npd_bin} ${flags}

[Install]
WantedBy=multi-user.target
EOF

  systemctl start node-problem-detector.service
}

# Create the log file and set its properties.
#
# $1 is the file to create.
proc prepare-log-file {
  touch $1
  chmod 644 $1
  chown root:root $1
}

# Prepares parameters for kube-proxy manifest.
# $1 source path of kube-proxy manifest.
proc prepare-kube-proxy-manifest-variables {
  local -r src_file=$1;

  remove-salt-config-comments ${src_file}

  local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
  local kube_docker_registry="gcr.io/google_containers"
  if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]] {
    setvar kube_docker_registry = ${KUBE_DOCKER_REGISTRY}
  }
  local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
  local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
  local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
  if [[ -n "${FEATURE_GATES:-}" ]] {
    setvar params = "" --feature-gates=${FEATURE_GATES}""
  }
  setvar params = "" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s""
  if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]] {
    setvar params = "" ${KUBEPROXY_TEST_ARGS}""
  }
  local container_env=""
  local kube_cache_mutation_detector_env_name=""
  local kube_cache_mutation_detector_env_value=""
  if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]] {
    setvar container_env = ""env:""
    setvar kube_cache_mutation_detector_env_name = ""- name: KUBE_CACHE_MUTATION_DETECTOR""
    setvar kube_cache_mutation_detector_env_value = ""value: \"${ENABLE_CACHE_MUTATION_DETECTOR}"\""
  }
  local pod_priority=""
  if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]] {
    setvar pod_priority = ""priorityClassName: system-node-critical""
  }
  sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
  sed -i -e "s@{{params}}@${params}@g" ${src_file}
  sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
  sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
  sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
  sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file}
  sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
  sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
  sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
  if [[ -n "${CLUSTER_IP_RANGE:-}" ]] {
    sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
  }
}

# Starts kube-proxy static pod.
proc start-kube-proxy {
  echo "Start kube-proxy static pod"
  prepare-log-file /var/log/kube-proxy.log
  local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
  prepare-kube-proxy-manifest-variables ${src_file}

  cp ${src_file} /etc/kubernetes/manifests
}

# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
proc prepare-etcd-manifest {
  local host_name=$(hostname)
  local etcd_cluster=""
  local cluster_state="new"
  local etcd_protocol="http"
  local etcd_creds=""

  if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]] {
    setvar cluster_state = "${INITIAL_ETCD_CLUSTER_STATE}"
  }
  if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]] {
    setvar etcd_creds = "" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth ""
    setvar etcd_protocol = ""https""
  }

  for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n") {
    setvar etcd_host = ""etcd-${host}=${etcd_protocol}://${host}:$3""
    if [[ -n "${etcd_cluster}" ]] {
      setvar etcd_cluster = "",""
    }
    setvar etcd_cluster = "${etcd_host}"
  }

  local -r temp_file="/tmp/$5"
  cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" ${temp_file}
  remove-salt-config-comments ${temp_file}
  sed -i -e "s@{{ *suffix *}}@$1@g" ${temp_file}
  sed -i -e "s@{{ *port *}}@$2@g" ${temp_file}
  sed -i -e "s@{{ *server_port *}}@$3@g" ${temp_file}
  sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" ${temp_file}
  sed -i -e "s@{{ *hostname *}}@$host_name@g" ${temp_file}
  sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" ${temp_file}
  sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" ${temp_file}
  # Get default storage backend from manifest file.
  local -r default_storage_backend=$(cat "${temp_file}" | \
    grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
    sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
  if [[ -n "${STORAGE_BACKEND:-}" ]] {
    sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" ${temp_file}
  } else {
    sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" ${temp_file}
  }
  if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]] {
    sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" ${temp_file}
  } else {
    sed -i -e "s@{{ *quota_bytes *}}@@g" ${temp_file}
  }
  sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" ${temp_file}
  if [[ -n "${ETCD_IMAGE:-}" ]] {
    sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" ${temp_file}
  } else {
    sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" ${temp_file}
  }
  if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]] {
    sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" ${temp_file}
  } else {
    sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" ${temp_file}
  }
  sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" ${temp_file}
  sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" ${temp_file}
  if [[ -n "${ETCD_VERSION:-}" ]] {
    sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" ${temp_file}
  } else {
    sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" ${temp_file}
  }
  # Replace the volume host path.
  sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" ${temp_file}
  mv ${temp_file} /etc/kubernetes/manifests
}

proc start-etcd-empty-dir-cleanup-pod {
  cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests"
}

# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
proc start-etcd-servers {
  echo "Start etcd pods"
  if [[ -d /etc/etcd ]] {
    rm -rf /etc/etcd
  }
  if [[ -e /etc/default/etcd ]] {
    rm -f /etc/default/etcd
  }
  if [[ -e /etc/systemd/system/etcd.service ]] {
    rm -f /etc/systemd/system/etcd.service
  }
  if [[ -e /etc/init.d/etcd ]] {
    rm -f /etc/init.d/etcd
  }
  prepare-log-file /var/log/etcd.log
  prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"

  prepare-log-file /var/log/etcd-events.log
  prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}

# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
#   CLOUD_CONFIG_OPT
#   CLOUD_CONFIG_VOLUME
#   CLOUD_CONFIG_MOUNT
#   DOCKER_REGISTRY
proc compute-master-manifest-variables {
  setvar CLOUD_CONFIG_OPT = """"
  setvar CLOUD_CONFIG_VOLUME = """"
  setvar CLOUD_CONFIG_MOUNT = """"
  if [[ -f /etc/gce.conf ]] {
    setvar CLOUD_CONFIG_OPT = ""--cloud-config=/etc/gce.conf""
    setvar CLOUD_CONFIG_VOLUME = ""{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},""
    setvar CLOUD_CONFIG_MOUNT = ""{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},""
  }
  setvar DOCKER_REGISTRY = ""gcr.io/google_containers""
  if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]] {
    setvar DOCKER_REGISTRY = "${KUBE_DOCKER_REGISTRY}"
  }
}

# A helper function that bind mounts kubelet dirs for running mount in a chroot
proc prepare-mounter-rootfs {
  echo "Prepare containerized mounter"
  mount --bind ${CONTAINERIZED_MOUNTER_HOME} ${CONTAINERIZED_MOUNTER_HOME}
  mount -o remount,exec ${CONTAINERIZED_MOUNTER_HOME}
  setvar CONTAINERIZED_MOUNTER_ROOTFS = ""${CONTAINERIZED_MOUNTER_HOME}/rootfs""
  mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
  mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
  mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
  mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
  cp /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/"
}

# A helper function for removing salt configuration and comments from a file.
# This is mainly for preparing a manifest file.
#
# $1: Full path of the file to manipulate
proc remove-salt-config-comments {
  # Remove salt configuration.
  sed -i "/^[ |\t]*{[#|%]/d" $1
  # Remove comments.
  sed -i "/^[ |\t]*#/d" $1
}

# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
#   CLOUD_CONFIG_OPT
#   CLOUD_CONFIG_VOLUME
#   CLOUD_CONFIG_MOUNT
#   DOCKER_REGISTRY
proc start-kube-apiserver {
  echo "Start kubernetes api-server"
  prepare-log-file /var/log/kube-apiserver.log
  prepare-log-file /var/log/kube-apiserver-audit.log

  # Calculate variables and assemble the command line.
  local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
  setvar params = "" --address=127.0.0.1""
  setvar params = "" --allow-privileged=true""
  setvar params = "" --cloud-provider=gce""
  setvar params = "" --client-ca-file=${CA_CERT_BUNDLE_PATH}""
  setvar params = "" --etcd-servers=http://127.0.0.1:2379""
  setvar params = "" --etcd-servers-overrides=/events#http://127.0.0.1:4002""
  setvar params = "" --secure-port=443""
  setvar params = "" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}""
  setvar params = "" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}""
  if [[ -s "${REQUESTHEADER_CA_CERT_PATH:-}" ]] {
    setvar params = "" --requestheader-client-ca-file=${REQUESTHEADER_CA_CERT_PATH}""
    setvar params = "" --requestheader-allowed-names=aggregator""
    setvar params = "" --requestheader-extra-headers-prefix=X-Remote-Extra-""
    setvar params = "" --requestheader-group-headers=X-Remote-Group""
    setvar params = "" --requestheader-username-headers=X-Remote-User""
    setvar params = "" --proxy-client-cert-file=${PROXY_CLIENT_CERT_PATH}""
    setvar params = "" --proxy-client-key-file=${PROXY_CLIENT_KEY_PATH}""
  }
  setvar params = "" --enable-aggregator-routing=true""
  if [[ -e "${APISERVER_CLIENT_CERT_PATH}" ]] && [[ -e "${APISERVER_CLIENT_KEY_PATH}" ]] {
    setvar params = "" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}""
    setvar params = "" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}""
  }
  if [[ -n "${SERVICEACCOUNT_CERT_PATH:-}" ]] {
    setvar params = "" --service-account-key-file=${SERVICEACCOUNT_CERT_PATH}""
  }
  setvar params = "" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv""
  if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]] {
    setvar params = "" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv""
  }
  if [[ -n "${STORAGE_BACKEND:-}" ]] {
    setvar params = "" --storage-backend=${STORAGE_BACKEND}""
  }
  if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]] {
    setvar params = "" --storage-media-type=${STORAGE_MEDIA_TYPE}""
  }
  if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]] {
    setvar params = "" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s""
  }
  if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]] {
    setvar params = "" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}""
  }
  if [[ -n "${NUM_NODES:-}" ]] {
    # If the cluster is large, increase max-requests-inflight limit in apiserver.
    if [[ "${NUM_NODES}" -ge 1000 ]] {
      setvar params = "" --max-requests-inflight=1500 --max-mutating-requests-inflight=500""
    }
    # Set amount of memory available for apiserver based on number of nodes.
    # TODO: Once we start setting proper requests and limits for apiserver
    # we should reuse the same logic here instead of current heuristic.
    setvar params = "" --target-ram-mb="$((${NUM_NODES} * 60))"
  }
  if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]] {
    setvar params = "" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}""
  }
  if [[ -n "${ETCD_QUORUM_READ:-}" ]] {
    setvar params = "" --etcd-quorum-read=${ETCD_QUORUM_READ}""
  }

  local audit_policy_config_mount=""
  local audit_policy_config_volume=""
  local audit_webhook_config_mount=""
  local audit_webhook_config_volume=""
  if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]] {
    # We currently only support enabling with a fixed path and with built-in log
    # rotation "disabled" (large value) so it behaves like kube-apiserver.log.
    # External log rotation should be set up the same as for kube-apiserver.log.
    setvar params = "" --audit-log-path=/var/log/kube-apiserver-audit.log""
    setvar params = "" --audit-log-maxage=0""
    setvar params = "" --audit-log-maxbackup=0""
    # Lumberjack doesn't offer any way to disable size-based rotation. It also
    # has an in-memory counter that doesn't notice if you truncate the file.
    # 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
    # grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
    # never restarts. Please manually restart apiserver before this time.
    setvar params = "" --audit-log-maxsize=2000000000""
    # Disable AdvancedAuditing enabled by default
    if [[ -z "${FEATURE_GATES:-}" ]] {
      setvar FEATURE_GATES = ""AdvancedAuditing=false""
    } else {
      setvar FEATURE_GATES = ""${FEATURE_GATES},AdvancedAuditing=false""
    }
  } elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]] {
    local -r audit_policy_file="/etc/audit_policy.config"
    setvar params = "" --audit-policy-file=${audit_policy_file}""
    # Create the audit policy file, and mount it into the apiserver pod.
    create-master-audit-policy ${audit_policy_file} ${ADVANCED_AUDIT_POLICY:-}
    setvar audit_policy_config_mount = ""{\"name\": \"auditpolicyconfigmount\",\"mountPath\": \"${audit_policy_file}\", \"readOnly\": true},""
    setvar audit_policy_config_volume = ""{\"name\": \"auditpolicyconfigmount\",\"hostPath\": {\"path\": \"${audit_policy_file}\", \"type\": \"FileOrCreate\"}},""

    if [[ "${ADVANCED_AUDIT_BACKEND:-log}" == *"log"* ]] {
      # The advanced audit log backend config matches the basic audit log config.
      setvar params = "" --audit-log-path=/var/log/kube-apiserver-audit.log""
      setvar params = "" --audit-log-maxage=0""
      setvar params = "" --audit-log-maxbackup=0""
      # Lumberjack doesn't offer any way to disable size-based rotation. It also
      # has an in-memory counter that doesn't notice if you truncate the file.
      # 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
      # grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
      # never restarts. Please manually restart apiserver before this time.
      setvar params = "" --audit-log-maxsize=2000000000""
    }
    if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]] {
      setvar params = "" --audit-webhook-mode=batch""

      # Create the audit webhook config file, and mount it into the apiserver pod.
      local -r audit_webhook_config_file="/etc/audit_webhook.config"
      setvar params = "" --audit-webhook-config-file=${audit_webhook_config_file}""
      create-master-audit-webhook-config ${audit_webhook_config_file}
      setvar audit_webhook_config_mount = ""{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},""
      setvar audit_webhook_config_volume = ""{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\", \"type\": \"FileOrCreate\"}},""
    }
  }

  if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]] {
    setvar params = "" --enable-logs-handler=false""
  }

  local admission_controller_config_mount=""
  local admission_controller_config_volume=""
  local image_policy_webhook_config_mount=""
  local image_policy_webhook_config_volume=""
  if [[ -n "${ADMISSION_CONTROL:-}" ]] {
    setvar params = "" --admission-control=${ADMISSION_CONTROL}""
    if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]] {
      setvar params = "" --admission-control-config-file=/etc/admission_controller.config""
      # Mount the file to configure admission controllers if ImagePolicyWebhook is set.
      setvar admission_controller_config_mount = ""{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false},""
      setvar admission_controller_config_volume = ""{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\", \"type\": \"FileOrCreate\"}},""
      # Mount the file to configure the ImagePolicyWebhook's webhook.
      setvar image_policy_webhook_config_mount = ""{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false},""
      setvar image_policy_webhook_config_volume = ""{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}},""
    }
  }

  if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]] {
    setvar params = "" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}""
  }
  if [[ -n "${RUNTIME_CONFIG:-}" ]] {
    setvar params = "" --runtime-config=${RUNTIME_CONFIG}""
  }
  if [[ -n "${FEATURE_GATES:-}" ]] {
    setvar params = "" --feature-gates=${FEATURE_GATES}""
  }
  if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]] {
    local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
    setvar params = "" --advertise-address=${vm_external_ip}""
    setvar params = "" --ssh-user=${PROXY_SSH_USER}""
    setvar params = "" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile""
  } elif test -n ${MASTER_ADVERTISE_ADDRESS:-} {
    setvar params = ""${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}""
  }

  local webhook_authn_config_mount=""
  local webhook_authn_config_volume=""
  if [[ -n "${GCP_AUTHN_URL:-}" ]] {
    setvar params = "" --authentication-token-webhook-config-file=/etc/gcp_authn.config""
    setvar webhook_authn_config_mount = ""{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},""
    setvar webhook_authn_config_volume = ""{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}},""
  }


  local authorization_mode="Node,RBAC"
  local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"

  # Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
  if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]] {
    echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this."
    # Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
    if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]] {
      local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
      remove-salt-config-comments ${abac_policy_json}
      if [[ -n "${KUBE_USER:-}" ]] {
        sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" ${abac_policy_json}
      } else {
        sed -i -e "/{{kube_user}}/d" ${abac_policy_json}
      }
      cp ${abac_policy_json} /etc/srv/kubernetes/
    }

    setvar params = "" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl""
    setvar authorization_mode = "",ABAC""
  }

  local webhook_config_mount=""
  local webhook_config_volume=""
  if [[ -n "${GCP_AUTHZ_URL:-}" ]] {
    setvar authorization_mode = "",Webhook""
    setvar params = "" --authorization-webhook-config-file=/etc/gcp_authz.config""
    setvar webhook_config_mount = ""{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},""
    setvar webhook_config_volume = ""{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}},""
  }
  setvar params = "" --authorization-mode=${authorization_mode}""

  local container_env=""
  if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]] {
    setvar container_env = ""\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}"\""
  }
  if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]] {
    if [[ -n "${container_env}" ]] {
      setvar container_env = ""${container_env}, ""
    }
    setvar container_env = ""\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}"\""
  }
  if [[ -n "${container_env}" ]] {
    setvar container_env = ""\"env\":[{${container_env}}],""
  }

  if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]] {
    local encryption_provider_config_path="/etc/srv/kubernetes/encryption-provider-config.yml"
    if [[ -n "${GOOGLE_CLOUD_KMS_CONFIG_FILE_NAME:-}" && -n "${GOOGLE_CLOUD_KMS_CONFIG:-}" ]] {
        echo ${GOOGLE_CLOUD_KMS_CONFIG} | base64 --decode > "${GOOGLE_CLOUD_KMS_CONFIG_FILE_NAME}"
    }

    echo ${ENCRYPTION_PROVIDER_CONFIG} | base64 --decode > "${encryption_provider_config_path}"
    setvar params = "" --experimental-encryption-provider-config=${encryption_provider_config_path}""
  }

  setvar src_file = ""${src_dir}/kube-apiserver.manifest""
  remove-salt-config-comments ${src_file}
  # Evaluate variables.
  local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
  sed -i -e "s@{{params}}@${params}@g" ${src_file}
  sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
  sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" ${src_file}
  sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" ${src_file}
  sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" ${src_file}
  sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" ${src_file}
  sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" ${src_file}
  sed -i -e "s@{{secure_port}}@443@g" ${src_file}
  sed -i -e "s@{{secure_port}}@8080@g" ${src_file}
  sed -i -e "s@{{additional_cloud_config_mount}}@@g" ${src_file}
  sed -i -e "s@{{additional_cloud_config_volume}}@@g" ${src_file}
  sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" ${src_file}
  sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" ${src_file}
  sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" ${src_file}
  sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" ${src_file}
  sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" ${src_file}
  sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" ${src_file}
  sed -i -e "s@{{audit_webhook_config_mount}}@${audit_webhook_config_mount}@g" ${src_file}
  sed -i -e "s@{{audit_webhook_config_volume}}@${audit_webhook_config_volume}@g" ${src_file}
  sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" ${src_file}
  sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" ${src_file}
  sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" ${src_file}
  sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" ${src_file}
  cp ${src_file} /etc/kubernetes/manifests
}

# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
#   CLOUD_CONFIG_OPT
#   CLOUD_CONFIG_VOLUME
#   CLOUD_CONFIG_MOUNT
#   DOCKER_REGISTRY
proc start-kube-controller-manager {
  echo "Start kubernetes controller-manager"
  create-kubecontrollermanager-kubeconfig
  prepare-log-file /var/log/kube-controller-manager.log
  # Calculate variables and assemble the command line.
  local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
  setvar params = "" --use-service-account-credentials""
  setvar params = "" --cloud-provider=gce""
  setvar params = "" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig""
  setvar params = "" --root-ca-file=${CA_CERT_BUNDLE_PATH}""
  setvar params = "" --service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}""
  if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]] {
    setvar params = "" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}""
  }
  if [[ -n "${INSTANCE_PREFIX:-}" ]] {
    setvar params = "" --cluster-name=${INSTANCE_PREFIX}""
  }
  if [[ -n "${CLUSTER_IP_RANGE:-}" ]] {
    setvar params = "" --cluster-cidr=${CLUSTER_IP_RANGE}""
  }
  if [[ -n "${CA_KEY:-}" ]] {
    setvar params = "" --cluster-signing-cert-file=${CA_CERT_PATH}""
    setvar params = "" --cluster-signing-key-file=${CA_KEY_PATH}""
  }
  if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]] {
    setvar params = "" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}""
  }
  if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]] {
    setvar params = "" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}""
  }
  if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] {
    setvar params = "" --allocate-node-cidrs=true""
  } elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]] {
    setvar params = "" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}""
  }
  if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]] {
    setvar params = "" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}""
  }
  if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]] {
    setvar params = "" --cidr-allocator-type=CloudAllocator""
    setvar params = "" --configure-cloud-routes=false""
  }
  if [[ -n "${FEATURE_GATES:-}" ]] {
    setvar params = "" --feature-gates=${FEATURE_GATES}""
  }
  if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]] {
    setvar params = "" --flex-volume-plugin-dir=${VOLUME_PLUGIN_DIR}""
  }
  if [[ -n "${CLUSTER_SIGNING_DURATION:-}" ]] {
    setvar params = "" --experimental-cluster-signing-duration=$CLUSTER_SIGNING_DURATION""
  }
  # disable using HPA metrics REST clients if metrics-server isn't enabled
  if [[ "${ENABLE_METRICS_SERVER:-}" != "true" ]] {
    setvar params = "" --horizontal-pod-autoscaler-use-rest-clients=false""
  }

  local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
  local container_env=""
  if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]] {
    setvar container_env = ""\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],""
  }

  local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
  remove-salt-config-comments ${src_file}
  # Evaluate variables.
  sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" ${src_file}
  sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" ${src_file}
  sed -i -e "s@{{params}}@${params}@g" ${src_file}
  sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
  sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" ${src_file}
  sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" ${src_file}
  sed -i -e "s@{{additional_cloud_config_mount}}@@g" ${src_file}
  sed -i -e "s@{{additional_cloud_config_volume}}@@g" ${src_file}
  cp ${src_file} /etc/kubernetes/manifests
}

# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
#   DOCKER_REGISTRY
proc start-kube-scheduler {
  echo "Start kubernetes scheduler"
  create-kubescheduler-kubeconfig
  prepare-log-file /var/log/kube-scheduler.log

  # Calculate variables and set them in the manifest.
  setvar params = ""${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}""
  setvar params = "" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig""
  if [[ -n "${FEATURE_GATES:-}" ]] {
    setvar params = "" --feature-gates=${FEATURE_GATES}""
  }
  if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}"  ]] {
    setvar params = "" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}""
  }
  local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")

  # Remove salt comments and replace variables with values.
  local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
  remove-salt-config-comments ${src_file}

  sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" ${src_file}
  sed -i -e "s@{{params}}@${params}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" ${src_file}
  sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" ${src_file}
  cp ${src_file} /etc/kubernetes/manifests
}

# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
#   CLOUD_CONFIG_OPT
#   CLOUD_CONFIG_VOLUME
#   CLOUD_CONFIG_MOUNT
proc start-cluster-autoscaler {
  if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]] {
    echo "Start kubernetes cluster autoscaler"
    prepare-log-file /var/log/cluster-autoscaler.log

    # Remove salt comments and replace variables with values
    local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
    remove-salt-config-comments ${src_file}

    local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
    sed -i -e "s@{{params}}@${params}@g" ${src_file}
    sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" ${src_file}
    sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" ${src_file}
    sed -i -e "s@{%.*%}@@g" ${src_file}

    cp ${src_file} /etc/kubernetes/manifests
  }
}

# A helper function for copying addon manifests and set dir/files
# permissions.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
proc setup-addon-manifests {
  local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2"
  local -r dst_dir="/etc/kubernetes/$1/$2"
  if [[ ! -d "${dst_dir}" ]] {
    mkdir -p ${dst_dir}
  }
  local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
  if [[ -n "${files}" ]] {
    cp "${src_dir}/"*.yaml ${dst_dir}
  }
  setvar files = $(find "${src_dir}" -maxdepth 1 -name "*.json")
  if [[ -n "${files}" ]] {
    cp "${src_dir}/"*.json ${dst_dir}
  }
  setvar files = $(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
  if [[ -n "${files}" ]] {
    cp "${src_dir}/"*.yaml.in ${dst_dir}
  }
  chown -R root:root ${dst_dir}
  chmod 755 ${dst_dir}
  chmod 644 "${dst_dir}"/*
}

# Fluentd manifest is modified using kubectl, which may not be available at
# this point. Run this as a background process.
proc wait-for-apiserver-and-update-fluentd {
  while ! kubectl get nodes
  {
    sleep 10
  }
  kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} \
    --limits=memory=${FLUENTD_GCP_MEMORY_LIMIT} \
    --requests=cpu=${FLUENTD_GCP_CPU_REQUEST},memory=${FLUENTD_GCP_MEMORY_REQUEST} \
    --containers=fluentd-gcp -o yaml > ${fluentd_gcp_yaml}.tmp
  mv ${fluentd_gcp_yaml}.tmp ${fluentd_gcp_yaml}
}

# Trigger background process that will ultimately update fluentd resource
# requirements.
proc start-fluentd-resource-update {
  wait-for-apiserver-and-update-fluentd &
}

# Updates parameters in yaml file for prometheus-to-sd configuration, or
# removes component if it is disabled.
proc update-prometheus-to-sd-parameters {
  if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]] {
    sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" $1
    sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" $1
  } else {
    # Removes all lines between two patterns (throws away prometheus-to-sd)
    sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" $1
   }
}

# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
#   CLUSTER_NAME
proc start-kube-addons {
  echo "Prepare kube-addons manifests and start kube addon manager"
  local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
  local -r dst_dir="/etc/kubernetes/addons"

  # prep addition kube-up specific rbac objects
  setup-addon-manifests "addons" "rbac"

  # Set up manifests of other addons.
  if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]] {
    prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
    setup-addon-manifests "addons" "kube-proxy"
  }
  if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
     [[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
     [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
     [[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \
     [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]] {
    local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}"
    setup-addon-manifests "addons" "cluster-monitoring"
    setup-addon-manifests "addons" ${file_dir}
    # Replace the salt configurations with variable values.
    setvar base_metrics_memory = "${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
    setvar base_eventer_memory = ""190Mi""
    setvar base_metrics_cpu = "${HEAPSTER_GCP_BASE_CPU:-80m}"
    setvar nanny_memory = ""90Mi""
    local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
    local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
    local -r eventer_memory_per_node="500"
    local -r nanny_memory_per_node="200"
    if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]] {
      setvar num_kube_nodes = """$((${NUM_NODES}+1))"
      setvar nanny_memory = ""$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki""
    }
    setvar controller_yaml = ""${dst_dir}/${file_dir}""
    if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]] {
      setvar controller_yaml = ""${controller_yaml}/heapster-controller-combined.yaml""
    } else {
      setvar controller_yaml = ""${controller_yaml}/heapster-controller.yaml""
    }
    remove-salt-config-comments ${controller_yaml}
    sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" ${controller_yaml}
    sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" ${controller_yaml}
    sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" ${controller_yaml}
    sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" ${controller_yaml}
    sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" ${controller_yaml}
    sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" ${controller_yaml}
    sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" ${controller_yaml}
    sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" ${controller_yaml}
    update-prometheus-to-sd-parameters ${controller_yaml}
  }
  if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]] {
    setup-addon-manifests "addons" "metrics-server"
  }
  if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]] {
    setup-addon-manifests "addons" "dns"
    local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml"
    mv "${dst_dir}/dns/kube-dns.yaml.in" ${kubedns_file}
    # Replace the salt configurations with variable values.
    sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" ${kubedns_file}
    sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" ${kubedns_file}

    if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]] {
      setup-addon-manifests "addons" "dns-horizontal-autoscaler"
    }
  }
  if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]] {
    setup-addon-manifests "addons" "registry"
    local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
    local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
    mv "${dst_dir}/registry/registry-pv.yaml.in" ${registry_pv_file}
    mv "${dst_dir}/registry/registry-pvc.yaml.in" ${registry_pvc_file}
    # Replace the salt configurations with variable values.
    remove-salt-config-comments ${controller_yaml}
    sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" ${registry_pv_file}
    sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" ${registry_pvc_file}
    sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" ${registry_pvc_file}
  }
  if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
     [[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
     [[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] {
    setup-addon-manifests "addons" "fluentd-elasticsearch"
  }
  if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
     [[ "${LOGGING_DESTINATION:-}" == "gcp" ]] {
    setup-addon-manifests "addons" "fluentd-gcp"
    local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
    local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
    update-prometheus-to-sd-parameters ${event_exporter_yaml}
    update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
    start-fluentd-resource-update
  }
  if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]] {
    setup-addon-manifests "addons" "dashboard"
  }
  if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]] {
    setup-addon-manifests "addons" "node-problem-detector"
  }
  if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]] {
    # Setup role binding for standalone node problem detector.
    setup-addon-manifests "addons" "node-problem-detector/standalone"
  }
  if echo ${ADMISSION_CONTROL:-} | grep -q "LimitRanger" {
    setup-addon-manifests "admission-controls" "limit-range"
  }
  if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]] {
    setup-addon-manifests "addons" "calico-policy-controller"

    # Configure Calico CNI directory.
    local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
    sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" ${ds_file}
  }
  if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]] {
    setup-addon-manifests "addons" "storage-class/gce"
  }
  if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]] {
    setup-addon-manifests "addons" "ip-masq-agent"
  }
  if [[ "${ENABLE_METADATA_PROXY:-}" == "simple" ]] {
    setup-addon-manifests "addons" "metadata-proxy/gce"
  }

  # Place addon manager pod manifest.
  cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
}

# Starts an image-puller - used in test clusters.
proc start-image-puller {
  echo "Start image-puller"
  cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
    /etc/kubernetes/manifests/
}

# Starts kube-registry proxy
proc start-kube-registry-proxy {
  echo "Start kube-registry-proxy"
  cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}

# Starts a l7 loadbalancing controller for ingress.
proc start-lb-controller {
  if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]] {
    echo "Start GCE L7 pod"
    prepare-log-file /var/log/glbc.log
    setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
    cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest" \
       /etc/kubernetes/manifests/
  }
}

# Starts rescheduler.
proc start-rescheduler {
  if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]] {
    echo "Start Rescheduler"
    prepare-log-file /var/log/rescheduler.log
    cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
       /etc/kubernetes/manifests/
  }
}

# Setup working directory for kubelet.
proc setup-kubelet-dir {
    echo "Making /var/lib/kubelet executable for kubelet"
    mount -B /var/lib/kubelet /var/lib/kubelet/
    mount -B -o remount,exec,suid,dev /var/lib/kubelet
}

proc reset-motd {
  # kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
  local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
  # This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
  # or the git hash that's in the build info.
  local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
  local devel=""
  if [[ "${gitref}" != "${version}" ]] {
    setvar devel = ""
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
  https://github.com/kubernetes/kubernetes/tree/${gitref}
""
    setvar gitref = "${version//*+/}"
  }
  cat > /etc/motd <<< """

Welcome to Kubernetes ${version}!

You can find documentation for Kubernetes at:
  http://docs.kubernetes.io/

The source for this release can be found at:
  /home/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
  https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz

It is based on the Kubernetes source at:
  https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
  /home/kubernetes/LICENSES

"""
}

proc override-kubectl {
    echo "overriding kubectl"
    echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
}

########### Main Function ###########
echo "Start to configure instance for kubernetes"

setvar KUBE_HOME = ""/home/kubernetes""
setvar CONTAINERIZED_MOUNTER_HOME = ""${KUBE_HOME}/containerized_mounter""
if [[ ! -e "${KUBE_HOME}/kube-env" ]] {
  echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
  exit 1
}

source "${KUBE_HOME}/kube-env"

if [[ -e "${KUBE_HOME}/kube-master-certs" ]] {
  source "${KUBE_HOME}/kube-master-certs"
}

if [[ -n "${KUBE_USER:-}" ]] {
  if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]] {
    echo "Bad KUBE_USER format."
    exit 1
  }
}

# generate the controller manager and scheduler tokens here since they are only used on the master.
setvar KUBE_CONTROLLER_MANAGER_TOKEN = $(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
setvar KUBE_SCHEDULER_TOKEN = $(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)

setup-os-params
config-ip-firewall
create-dirs
setup-kubelet-dir
ensure-local-ssds
setup-logrotate
if [[ "${KUBERNETES_MASTER:-}" == "true" ]] {
  mount-master-pd
  create-node-pki
  create-master-pki
  create-master-auth
  create-master-kubelet-auth
  create-master-etcd-auth
} else {
  create-node-pki
  create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
  if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] {
    create-kubeproxy-user-kubeconfig
  }
  if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]] {
    create-node-problem-detector-kubeconfig
  }
}

override-kubectl
# Run the containerized mounter once to pre-cache the container image.
assemble-docker-flags
start-kubelet

if [[ "${KUBERNETES_MASTER:-}" == "true" ]] {
  compute-master-manifest-variables
  start-etcd-servers
  start-etcd-empty-dir-cleanup-pod
  start-kube-apiserver
  start-kube-controller-manager
  start-kube-scheduler
  start-kube-addons
  start-cluster-autoscaler
  start-lb-controller
  start-rescheduler
} else {
  if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]] {
    start-kube-proxy
  }
  # Kube-registry-proxy.
  if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]] {
    start-kube-registry-proxy
  }
  if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]] {
    start-image-puller
  }
  if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]] {
    start-node-problem-detector
  }
}
reset-motd
prepare-mounter-rootfs
modprobe configs
echo "Done for the configuration for kubernetes"
    (DONE benchmarks/testdata/configure-helper.sh)
#!/bin/sh
setvar srcversion = ''$MirOS: src/bin/mksh/Build.sh,v 1.697 2016/03/04 18:28:39 tg Exp $''
#-
# Copyright (c) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
#		2011, 2012, 2013, 2014, 2015, 2016
#	mirabilos <m@mirbsd.org>
#
# Provided that these terms and disclaimer and all copyright notices
# are retained or reproduced in an accompanying document, permission
# is granted to deal in this work without restriction, including un-
# limited rights to use, publicly perform, distribute, sell, modify,
# merge, give away, or sublicence.
#
# This work is provided "AS IS" and WITHOUT WARRANTY of any kind, to
# the utmost extent permitted by applicable law, neither express nor
# implied; without malicious intent or gross negligence. In no event
# may a licensor, author or contributor be held liable for indirect,
# direct, other damage, loss, or other issues arising in any way out
# of dealing in the work, even if advised of the possibility of such
# damage or existence of a defect, except proven that it results out
# of said person's immediate fault when using the work as intended.
#-
# People analysing the output must whitelist conftest.c for any kind
# of compiler warning checks (mirtoconf is by design not quiet).
#
# Used environment documentation is at the end of this file.

setvar LC_ALL = 'C'
export LC_ALL

case (ZSH_VERSION) {
:zsh* { setvar ZSH_VERSION = '2' }
}

if test -n ${ZSH_VERSION+x} && shell {emulate sh} >/dev/null 2>&1 {
	emulate sh
	setvar NULLCMD = ':'
}

if test -d /usr/xpg4/bin/. >/dev/null 2>&1 {
	# Solaris: some of the tools have weird behaviour, use portable ones
	setvar PATH = "/usr/xpg4/bin:$PATH"
	export PATH
}

setvar nl = ''
''
setvar safeIFS = ''	''
setvar safeIFS = "" $safeIFS$nl""
setvar IFS = "$safeIFS"
setvar allu = 'QWERTYUIOPASDFGHJKLZXCVBNM'
setvar alll = 'qwertyuiopasdfghjklzxcvbnm'
setvar alln = '0123456789'
setvar alls = '______________________________________________________________'

proc genopt_die {
	if test -n $1 {
		echo >&2 "E: $[join(ARGV)]>&2 "E: $*"
		echo >&2 "E: in '$srcfile': '$line'>&2 "E: in '$srcfile': '$line'"
	} else {
		echo >&2 "E: invalid input in '$srcfile': '$line'>&2 "E: invalid input in '$srcfile': '$line'"
	}
	rm -f "$bn.gen"
	exit 1
}

proc genopt_soptc {
	setvar optc = $(echo $line | sed 's/^[<>]\(.\).*$/\1/)
	test x"$optc" = x'|' && return
	setvar optclo = $(echo $optc | tr $allu $alll)
	if test x"$optc" = x"$optclo" {
		setvar islo = '1'
	} else {
		setvar islo = '0'
	}
	setvar sym = $(echo $line | sed 's/^[<>]/|/)
	setvar o_str = "$o_str$nl"<$optclo$islo$sym""
}

proc genopt_scond {
	case{
	x {
		setvar cond = ''
		}
	x*' '* {
		setvar cond = $(echo $cond | sed 's/^ //)
		setvar cond = ""#if $cond""
		}
	x'!'* {
		setvar cond = $(echo $cond | sed 's/^!//)
		setvar cond = ""#ifndef $cond""
		}
	x* {
		setvar cond = ""#ifdef $cond""
		}
	}
}

proc do_genopt {
	setvar srcfile = "$1"
	test -f $srcfile || genopt_die Source file '$'srcfile not set.
	setvar bn = $(basename $srcfile | sed 's/.opt$//)
	setvar o_hdr = ''/* +++ GENERATED FILE +++ DO NOT EDIT +++ */''
	setvar o_gen = ''
	setvar o_str = ''
	setvar o_sym = ''
	setvar ddefs = ''
	setvar state = '0'
	exec <"$srcfile"
	setvar IFS = ''''
	while IFS= read line {
		setvar IFS = "$safeIFS"
		case (state) {
		2:'|'* {
			# end of input
			setvar o_sym = $(echo $line | sed 's/^.//)
			setvar o_gen = "$o_gen$nl"#undef F0""
			setvar o_gen = "$o_gen$nl"#undef FN""
			setvar o_gen = "$o_gen$ddefs"
			setvar state = '3'
			}
		1:@@ {
			# begin of data block
			setvar o_gen = "$o_gen$nl"#endif""
			setvar o_gen = "$o_gen$nl"#ifndef F0""
			setvar o_gen = "$o_gen$nl"#define F0 FN""
			setvar o_gen = "$o_gen$nl"#endif""
			setvar state = '2'
			}
		*:@@* {
			genopt_die }
		0:/\*-|0:\ \**|0: {
			setvar o_hdr = "$o_hdr$nl$line"
			}
		0:@*|1:@* {
			# begin of a definition block
			setvar sym = $(echo $line | sed 's/^@//)
			if test $state = 0 {
				setvar o_gen = "$o_gen$nl"#if defined($sym)""
			} else {
				setvar o_gen = "$o_gen$nl"#elif defined($sym)""
			}
			setvar ddefs = ""$ddefs$nl#undef $sym""
			setvar state = '1'
			}
		0:*|3:* {
			genopt_die }
		1:* {
			# definition line
			setvar o_gen = "$o_gen$nl$line"
			}
		2:'<'*'|'* {
			genopt_soptc
			}
		2:'>'*'|'* {
			genopt_soptc
			setvar cond = $(echo $line | sed 's/^[^|]*|//)
			genopt_scond
			case (optc) {
			'|' { setvar optc = '0' }
			* { setvar optc = "'''$optc'''" }
			}''
			IFS= read line || genopt_die Unexpected EOF
			setvar IFS = "$safeIFS"
			test -n $cond && setvar o_gen = "$o_gen$nl"$cond""
			setvar o_gen = "$o_gen$nl"$line, $optc)""
			test -n $cond && setvar o_gen = "$o_gen$nl"#endif""
			}
		}
	}
	case (state) {
	3: { genopt_die Expected optc sym at EOF }
	3:* { }
	* { genopt_die Missing EOF marker }
	}
	echo $o_str | sort' | while IFS='|'' read x opts cond {
		setvar IFS = "$safeIFS"
		test -n $x || continue
		genopt_scond
		test -n $cond && echo $cond
		echo "\"$opts\""
		test -n $cond && echo "#endif"
	} | do {
		echo $o_hdr
		echo "#ifndef $o_sym$o_gen"
		echo "#else"
		cat
		echo "#undef $o_sym"
		echo "#endif"
	} >"$bn.gen"
	setvar IFS = "$safeIFS"
	return 0
}

if test x"$BUILDSH_RUN_GENOPT" = x"1" {
	set x -G $srcfile
	shift
}
if test x"$1" = x"-G" {
	do_genopt $2
	exit $?
}

echo "For the build logs, demonstrate that /dev/null and /dev/tty exist:"
ls -l /dev/null /dev/tty

proc v {
	$e "$[join(ARGV)]"
	eval @ARGV
}

proc vv {
	setvar _c = "$1"
	shift
	$e "\$ $[join(ARGV)]" 2>&1
	eval @ARGV >vv.out 2>&1
	sed "s^${_c} " <vv.out
}

proc vq {
	eval @ARGV
}

proc rmf {
	for _f in "$@" {
		case (_f) {
		Build.sh|check.pl|check.t|dot.mkshrc|*.1|*.c|*.h|*.ico|*.opt { }
		* { rm -f $_f }
		}
	}
}

setvar tcfn = 'no'
setvar bi = ''
setvar ui = ''
setvar ao = ''
setvar fx = ''
setvar me = $(basename $0)
setvar orig_CFLAGS = "$CFLAGS"
setvar phase = 'x'
setvar oldish_ed = "stdout-ed,no-stderr-ed"

if test -t 1 {
	setvar bi = ''''
	setvar ui = ''''
	setvar ao = ''''
}

proc upper {
	echo :"$[join(ARGV)]" | sed 's/^://' | tr $alll $allu
}

# clean up after ac_testrun()
proc ac_testdone {
	eval HAVE_$fu=$fv
	setvar fr = 'no'
	test 0 = $fv || setvar fr = 'yes'
	$e "$bi==> $fd...$ao $ui$fr$ao$fx"
	setvar fx = ''
}

# ac_cache label: sets f, fu, fv?=0
proc ac_cache {
	setvar f = "$1"
	setvar fu = $(upper $f)
	eval fv='$'HAVE_$fu
	case (fv) {
	0|1 {
		setvar fx = '' (cached)''
		return 0
		}
	}
	setvar fv = '0'
	return 1
}

# ac_testinit label [!] checkif[!]0 [setlabelifcheckis[!]0] useroutput
# returns 1 if value was cached/implied, 0 otherwise: call ac_testdone
proc ac_testinit {
	if ac_cache $1 {
		test x"$2" = x"!" && shift
		test x"$2" = x"" || shift
		setvar fd = ${3-$f}
		ac_testdone
		return 1
	}
	setvar fc = '0'
	if test x"$2" = x"" {
		setvar ft = '1'
	} else {
		if test x"$2" = x"!" {
			setvar fc = '1'
			shift
		}
		eval ft='$'HAVE_$(upper $2)
		shift
	}
	setvar fd = ${3-$f}
	if test $fc = $ft {
		setvar fv = "$2"
		setvar fx = '' (implied)''
		ac_testdone
		return 1
	}
	$e ... $fd
	return 0
}

# pipe .c | ac_test[n] [!] label [!] checkif[!]0 [setlabelifcheckis[!]0] useroutput
proc ac_testnnd {
	if test x"$1" = x"!" {
		setvar fr = '1'
		shift
	} else {
		setvar fr = '0'
	}
	ac_testinit @ARGV || return 1
	cat >conftest.c
	vv ']' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN conftest.c $LIBS $ccpr"
	test $tcfn = no && test -f a.out && setvar tcfn = 'a.out'
	test $tcfn = no && test -f a.exe && setvar tcfn = 'a.exe'
	test $tcfn = no && test -f conftest.exe && setvar tcfn = 'conftest.exe'
	test $tcfn = no && test -f conftest && setvar tcfn = 'conftest'
	if test -f $tcfn {
		test 1 = $fr || setvar fv = '1'
	} else {
		test 0 = $fr || setvar fv = '1'
	}
	setvar vscan = ''
	if test $phase = u {
		test $ct = gcc && setvar vscan = ''unrecogni[sz]ed''
		test $ct = hpcc && setvar vscan = ''unsupported''
		test $ct = pcc && setvar vscan = ''unsupported''
		test $ct = sunpro && setvar vscan = ''-e ignored -e turned.off''
	}
	test -n $vscan && grep $vscan vv.out >/dev/null 2>&1 && setvar fv = "$fr"
	return 0
}
proc ac_testn {
	ac_testnnd @ARGV || return
	rmf conftest.c conftest.o ${tcfn}* vv.out
	ac_testdone
}

# ac_ifcpp cppexpr [!] label [!] checkif[!]0 [setlabelifcheckis[!]0] useroutput
proc ac_ifcpp {
	setvar expr = "$1"; shift
	ac_testn @ARGV <<< """
#include <unistd.h>
extern int thiswillneverbedefinedIhope(void);
int main(void) { return (isatty(0) +
#$expr
    0
#else
/* force a failure: expr is false */
    thiswillneverbedefinedIhope()
#endif
    ); }
"""
	test x"$1" = x"!" && shift
	setvar f = "$1"
	setvar fu = $(upper $f)
	eval fv='$'HAVE_$fu
	test x"$fv" = x"1"
}

proc add_cppflags {
	setvar CPPFLAGS = ""$CPPFLAGS $[join(ARGV)]""
}

proc ac_cppflags {
	test x"$1" = x"" || setvar fu = "$1"
	setvar fv = "$2"
	test x"$2" = x"" && eval fv='$'HAVE_$fu
	add_cppflags -DHAVE_$fu=$fv
}

proc ac_test {
	ac_testn @ARGV
	ac_cppflags
}

# ac_flags [-] add varname cflags [text] [ldflags]
proc ac_flags {
	if test x"$1" = x"-" {
		shift
		setvar hf = '1'
	} else {
		setvar hf = '0'
	}
	setvar fa = "$1"
	setvar vn = "$2"
	setvar f = "$3"
	setvar ft = "$4"
	setvar fl = "$5"
	test x"$ft" = x"" && setvar ft = ""if $f can be used""
	setvar save_CFLAGS = "$CFLAGS"
	setvar CFLAGS = ""$CFLAGS $f""
	if test -n $fl {
		setvar save_LDFLAGS = "$LDFLAGS"
		setvar LDFLAGS = ""$LDFLAGS $fl""
	}
	if test 1 = $hf {
		ac_testn can_$vn '' $ft
	} else {
		ac_testn can_$vn '' $ft <<< '''
/* evil apo'stroph in comment test */
#include <unistd.h>
int main(void) { return (isatty(0)); }
'''
	}
	eval fv='$'HAVE_CAN_$(upper $vn)
	if test -n $fl {
		test 11 = $fa$fv || setvar LDFLAGS = "$save_LDFLAGS"
	}
	test 11 = $fa$fv || setvar CFLAGS = "$save_CFLAGS"
}

# ac_header [!] header [prereq ...]
proc ac_header {
	if test x"$1" = x"!" {
		setvar na = '1'
		shift
	} else {
		setvar na = '0'
	}
	setvar hf = "$1"; shift
	setvar hv = $(echo $hf | tr -d '\012\015' | tr -c $alll$allu$alln $alls)
	echo "/* NeXTstep bug workaround */" for i in @ARGV {
		case (i) {
		_time {
			echo '#if HAVE_BOTH_TIME_H' >>x
			echo '#include <sys/time.h>' >>x
			echo '#include <time.h>' >>x
			echo '#elif HAVE_SYS_TIME_H' >>x
			echo '#include <sys/time.h>' >>x
			echo '#elif HAVE_TIME_H' >>x
			echo '#include <time.h>' >>x
			echo '#endif' >>x
			}
		* {
			echo "#include <$i>" >>x
			}
		}
	}
	echo "#include <$hf>" >>x
	echo '#include <unistd.h>' >>x
	echo 'int main(void) { return (isatty(0)); }' >>x
	ac_testn $hv "" "<$hf>" <x
	rmf x
	test 1 = $na || ac_cppflags
}

proc addsrcs {
	if test x"$1" = x"!" {
		setvar fr = '0'
		shift
	} else {
		setvar fr = '1'
	}
	eval i='$'$1
	test $fr = $i && case{
	*\ $2\ * {	}
	* {		setvar SRCS = ""$SRCS $2"" }
	}
}


setvar curdir = $(pwd), srcdir = $(dirname $0 )
case{
x {
	setvar srcdir = '.'
	}
*\ *|*"	"*|*"$nl"* {
	echo >&2 Source directory should not contain space or tab or newline.>&2 Source directory should not contain space or tab or newline.
	echo >&2 Errors may occur.>&2 Errors may occur.
	}
*"'"* {
	echo Source directory must not contain single quotes.
	exit 1
	}
}
setvar dstversion = $(sed -n '/define MKSH_VERSION/s/^.*"\([^"]*\)".*$/\1/p' "$srcdir/sh.h)
add_cppflags -DMKSH_BUILDSH

setvar e = 'echo'
setvar r = '0'
setvar eq = '0'
setvar pm = '0'
setvar cm = 'normal'
setvar optflags = '-std-compile-opts'
setvar check_categories = ''
setvar last = ''
setvar tfn = ''
setvar legacy = '0'for i in @ARGV {
	case (last) {
	c:combine|c:dragonegg|c:llvm|c:lto {
		setvar cm = "$i"
		setvar last = ''
		}
	c:* {
		echo "$me: Unknown option -c '$i'!" >&2
		exit 1
		}
	o:* {
		setvar optflags = "$i"
		setvar last = ''
		}
	t:* {
		setvar tfn = "$i"
		setvar last = ''
		}
	:-c {
		setvar last = 'c'
		}
	:-G {
		echo "$me: Do not call me with '-G'!" >&2
		exit 1
		}
	:-g {
		# checker, debug, valgrind build
		add_cppflags -DDEBUG
		setvar CFLAGS = ""$CFLAGS -g3 -fno-builtin""
		}
	:-j {
		setvar pm = '1'
		}
	:-L {
		setvar legacy = '1'
		}
	:+L {
		setvar legacy = '0'
		}
	:-M {
		setvar cm = 'makefile'
		}
	:-O {
		setvar optflags = '-std-compile-opts'
		}
	:-o {
		setvar last = 'o'
		}
	:-Q {
		setvar eq = '1'
		}
	:-r {
		setvar r = '1'
		}
	:-t {
		setvar last = 't'
		}
	:-v {
		echo "Build.sh $srcversion"
		echo "for mksh $dstversion"
		exit 0
		}
	:* {
		echo "$me: Unknown option '$i'!" >&2
		exit 1
		}
	* {
		echo "$me: Unknown option -'$last' '$i'!" >&2
		exit 1
		}
	}
}
if test -n $last {
	echo "$me: Option -'$last' not followed by argument!" >&2
	exit 1
}

test -z $tfn && if test $legacy = 0 {
	setvar tfn = 'mksh'
} else {
	setvar tfn = 'lksh'
}
if test -d $tfn || test -d $tfn.exe {
	echo "$me: Error: ./$tfn is a directory!" >&2
	exit 1
}
rmf a.exe* a.out* conftest.c conftest.exe* *core core.* ${tfn}* *.bc *.dbg \
    *.ll *.o *.gen Rebuild.sh lft no signames.inc test.sh x vv.out

setvar SRCS = ""lalloc.c eval.c exec.c expr.c funcs.c histrap.c jobs.c""
setvar SRCS = ""$SRCS lex.c main.c misc.c shf.c syn.c tree.c var.c""

if test $legacy = 0 {
	setvar SRCS = ""$SRCS edit.c""
	setvar check_categories = ""$check_categories shell:legacy-no int:32""
} else {
	setvar check_categories = ""$check_categories shell:legacy-yes""
	add_cppflags -DMKSH_LEGACY_MODE
	setvar HAVE_PERSISTENT_HISTORY = '0'
	setvar HAVE_ISSET_MKSH_CONSERVATIVE_FDS = '1'	# from sh.h
}

if test x"$srcdir" = x"." {
	setvar CPPFLAGS = ""-I. $CPPFLAGS""
} else {
	setvar CPPFLAGS = ""-I. -I'$srcdir' $CPPFLAGS""
}
test -n $LDSTATIC && if test -n $LDFLAGS {
	setvar LDFLAGS = ""$LDFLAGS $LDSTATIC""
} else {
	setvar LDFLAGS = "$LDSTATIC"
}

if test -z $TARGET_OS {
	setvar x = $(uname -s 2>/dev/null || uname)
	test x"$x" = x"$(uname -n )" || setvar TARGET_OS = "$x"
}
if test -z $TARGET_OS {
	echo "$me: Set TARGET_OS, your uname is broken!" >&2
	exit 1
}
setvar oswarn = ''
setvar ccpc = "-Wc,"
setvar ccpl = "-Wl,"
setvar tsts = ''
setvar ccpr = ''|| for _f in ${tcfn}*; do case $_f in Build.sh|check.pl|check.t|dot.mkshrc|*.1|*.c|*.h|*.ico|*.opt) ;; *) rm -f "$_f" ;; esac; done''

# Evil hack
if test x"$TARGET_OS" = x"Android" {
	setvar check_categories = ""$check_categories android""
	setvar TARGET_OS = 'Linux'
}

# Evil OS
if test x"$TARGET_OS" = x"Minix" {
	echo >&2 "
WARNING: additional checks before running Build.sh required!
You can avoid these by calling Build.sh correctly, see below.
>&2 "
WARNING: additional checks before running Build.sh required!
You can avoid these by calling Build.sh correctly, see below.
"
	cat >conftest.c <<< '''
#include <sys/types.h>
const char *
#ifdef _NETBSD_SOURCE
ct="Ninix3"
#else
ct="Minix3"
#endif
;
'''
	setvar ct = 'unknown'
	vv ']' "${CC-cc} -E $CFLAGS $CPPFLAGS $NOWARN conftest.c | grep ct= | tr -d \\\\015 >x"
	sed 's/^/[ /' x
	eval $(cat x)
	rmf x vv.out
	case (ct) {
	Minix3|Ninix3 {
		echo >&2 "
Warning: you set TARGET_OS to $TARGET_OS but that is ambiguous.
Please set it to either Minix3 or Ninix3, whereas the latter is
all versions of Minix with even partial NetBSD(R) userland. The
value determined from your compiler for the current compilation
(which may be wrong) is: $ct
>&2 "
Warning: you set TARGET_OS to $TARGET_OS but that is ambiguous.
Please set it to either Minix3 or Ninix3, whereas the latter is
all versions of Minix with even partial NetBSD(R) userland. The
value determined from your compiler for the current compilation
(which may be wrong) is: $ct
"
		setvar TARGET_OS = "$ct"
		}
	* {
		echo >&2 "
Warning: you set TARGET_OS to $TARGET_OS but that is ambiguous.
Please set it to either Minix3 or Ninix3, whereas the latter is
all versions of Minix with even partial NetBSD(R) userland. The
proper value couldn't be determined, continue at your own risk.
>&2 "
Warning: you set TARGET_OS to $TARGET_OS but that is ambiguous.
Please set it to either Minix3 or Ninix3, whereas the latter is
all versions of Minix with even partial NetBSD(R) userland. The
proper value couldn't be determined, continue at your own risk.
"
		}
	}
}

# Configuration depending on OS revision, on OSes that need them
case (TARGET_OS) {
NEXTSTEP {
	test x"$TARGET_OSREV" = x"" && setvar TARGET_OSREV = $(hostinfo 2>&1 | \
	    grep 'NeXT Mach [0-9][0-9.]*:' | \
	    sed 's/^.*NeXT Mach \([0-9][0-9.]*\):.*$/\1/)
	}
QNX|SCO_SV {
	test x"$TARGET_OSREV" = x"" && setvar TARGET_OSREV = $(uname -r)
	}
}

# Configuration depending on OS name
case (TARGET_OS) {
386BSD {
	: ${HAVE_CAN_OTWO=0}
	add_cppflags -DMKSH_NO_SIGSETJMP
	add_cppflags -DMKSH_TYPEDEF_SIG_ATOMIC_T=int
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	}
AIX {
	add_cppflags -D_ALL_SOURCE
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
BeOS {
	case (KSH_VERSION) {
	*MIRBSD\ KSH* {
		setvar oswarn = ""; it has minor issues""
		}
	* {
		setvar oswarn = ""; you must recompile mksh with""
		setvar oswarn = ""$oswarn${nl}itself in a second stage""
		}
	}
	# BeOS has no real tty either
	add_cppflags -DMKSH_UNEMPLOYED
	add_cppflags -DMKSH_DISABLE_TTY_WARNING
	# BeOS doesn't have different UIDs and GIDs
	add_cppflags -DMKSH__NO_SETEUGID
	}
BSD/OS {
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
Coherent {
	setvar oswarn = ""; it has major issues""
	add_cppflags -DMKSH__NO_SYMLINK
	setvar check_categories = ""$check_categories nosymlink""
	add_cppflags -DMKSH__NO_SETEUGID
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	add_cppflags -DMKSH_DISABLE_TTY_WARNING
	}
CYGWIN* {
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
Darwin {
	add_cppflags -D_DARWIN_C_SOURCE
	}
DragonFly {
	}
FreeBSD {
	}
FreeMiNT {
	setvar oswarn = ""; it has minor issues""
	add_cppflags -D_GNU_SOURCE
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
GNU {
	case (CC) {
	*tendracc* { }
	* { add_cppflags -D_GNU_SOURCE }
	}
	add_cppflags -DSETUID_CAN_FAIL_WITH_EAGAIN
	# define MKSH__NO_PATH_MAX to use Hurd-only functions
	add_cppflags -DMKSH__NO_PATH_MAX
	}
GNU/kFreeBSD {
	case (CC) {
	*tendracc* { }
	* { add_cppflags -D_GNU_SOURCE }
	}
	add_cppflags -DSETUID_CAN_FAIL_WITH_EAGAIN
	}
Haiku {
	add_cppflags -DMKSH_ASSUME_UTF8; setvar HAVE_ISSET_MKSH_ASSUME_UTF8 = '1'
	}
HP-UX {
	}
Interix {
	setvar ccpc = ''-X ''
	setvar ccpl = ''-Y ''
	add_cppflags -D_ALL_SOURCE
	: ${LIBS=-lcrypt}
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
IRIX* {
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
Linux {
	case (CC) {
	*tendracc* { }
	* { add_cppflags -D_GNU_SOURCE }
	}
	add_cppflags -DSETUID_CAN_FAIL_WITH_EAGAIN
	: ${HAVE_REVOKE=0}
	}
LynxOS {
	setvar oswarn = ""; it has minor issues""
	}
MidnightBSD {
	}
Minix-vmd {
	add_cppflags -DMKSH__NO_SETEUGID
	add_cppflags -DMKSH_UNEMPLOYED
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	add_cppflags -D_MINIX_SOURCE
	setvar oldish_ed = 'no-stderr-ed'		# no /bin/ed, maybe see below
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
Minix3 {
	add_cppflags -DMKSH_UNEMPLOYED
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	add_cppflags -DMKSH_NO_LIMITS
	add_cppflags -D_POSIX_SOURCE -D_POSIX_1_SOURCE=2 -D_MINIX
	setvar oldish_ed = 'no-stderr-ed'		# /usr/bin/ed(!) is broken
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
MirBSD {
	}
MSYS_* {
	add_cppflags -DMKSH_ASSUME_UTF8=0; setvar HAVE_ISSET_MKSH_ASSUME_UTF8 = '1'
	# almost same as CYGWIN* (from RT|Chatzilla)
	: ${HAVE_SETLOCALE_CTYPE=0}
	# broken on this OE (from ir0nh34d)
	: ${HAVE_STDINT_H=0}
	}
NetBSD {
	}
NEXTSTEP {
	add_cppflags -D_NEXT_SOURCE
	add_cppflags -D_POSIX_SOURCE
	: ${AWK=gawk}
	: ${CC=cc -posix}
	add_cppflags -DMKSH_NO_SIGSETJMP
	# NeXTstep cannot get a controlling tty
	add_cppflags -DMKSH_UNEMPLOYED
	case (TARGET_OSREV) {
	4.2* {
		# OpenStep 4.2 is broken by default
		setvar oswarn = ""; it needs libposix.a""
		}
	}
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	}
Ninix3 {
	# similar to Minix3
	add_cppflags -DMKSH_UNEMPLOYED
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	add_cppflags -DMKSH_NO_LIMITS
	# but no idea what else could be needed
	setvar oswarn = ""; it has unknown issues""
	}
OpenBSD {
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
OS/2 {
	setvar HAVE_TERMIOS_H = '0'
	setvar HAVE_MKNOD = '0'	# setmode() incompatible
	setvar oswarn = ""; it is currently being ported""
	setvar check_categories = ""$check_categories nosymlink""
	: ${CC=gcc}
	: ${SIZE=: size}
	add_cppflags -DMKSH_UNEMPLOYED
	add_cppflags -DMKSH_NOPROSPECTOFWORK
	}
OSF1 {
	setvar HAVE_SIG_T = '0'	# incompatible
	add_cppflags -D_OSF_SOURCE
	add_cppflags -D_POSIX_C_SOURCE=200112L
	add_cppflags -D_XOPEN_SOURCE=600
	add_cppflags -D_XOPEN_SOURCE_EXTENDED
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
Plan9 {
	add_cppflags -D_POSIX_SOURCE
	add_cppflags -D_LIMITS_EXTENSION
	add_cppflags -D_BSD_EXTENSION
	add_cppflags -D_SUSV2_SOURCE
	add_cppflags -DMKSH_ASSUME_UTF8; setvar HAVE_ISSET_MKSH_ASSUME_UTF8 = '1'
	add_cppflags -DMKSH_NO_CMDLINE_EDITING
	add_cppflags -DMKSH__NO_SETEUGID
	setvar oswarn = '' and will currently not work''
	add_cppflags -DMKSH_UNEMPLOYED
	# this is for detecting kencc
	add_cppflags -DMKSH_MAYBE_KENCC
	}
PW32* {
	setvar HAVE_SIG_T = '0'	# incompatible
	setvar oswarn = '' and will currently not work''
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
QNX {
	add_cppflags -D__NO_EXT_QNX
	add_cppflags -D__EXT_UNIX_MISC
	case (TARGET_OSREV) {
	[012345].*|6.[0123].*|6.4.[01] {
		setvar oldish_ed = 'no-stderr-ed'		# oldish /bin/ed is broken
		}
	}
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
SCO_SV {
	case (TARGET_OSREV) {
	3.2* {
		# SCO OpenServer 5
		add_cppflags -DMKSH_UNEMPLOYED
		}
	5* {
		# SCO OpenServer 6
		}
	* {
		setvar oswarn = ''; this is an unknown version of''
		setvar oswarn = ""$oswarn$nl$TARGET_OS ${TARGET_OSREV}, please tell me what to do""
		}
	}
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	: "${HAVE_SYS_SIGLIST=0}${HAVE__SYS_SIGLIST=0}"
	}
skyos {
	setvar oswarn = ""; it has minor issues""
	}
SunOS {
	add_cppflags -D_BSD_SOURCE
	add_cppflags -D__EXTENSIONS__
	}
syllable {
	add_cppflags -D_GNU_SOURCE
	add_cppflags -DMKSH_NO_SIGSUSPEND
	setvar oswarn = '' and will currently not work''
	}
ULTRIX {
	: ${CC=cc -YPOSIX}
	add_cppflags -DMKSH_TYPEDEF_SSIZE_T=int
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
UnixWare|UNIX_SV {
	# SCO UnixWare
	add_cppflags -DMKSH_CONSERVATIVE_FDS
	: "${HAVE_SYS_SIGLIST=0}${HAVE__SYS_SIGLIST=0}"
	}
UWIN* {
	setvar ccpc = ''-Yc,''
	setvar ccpl = ''-Yl,''
	setvar tsts = "" 3<>/dev/tty""
	setvar oswarn = ""; it will compile, but the target""
	setvar oswarn = ""$oswarn${nl}platform itself is very flakey/unreliable""
	: ${HAVE_SETLOCALE_CTYPE=0}
	}
_svr4 {
	# generic target for SVR4 Unix with uname -s = uname -n
	# this duplicates the * target below
	setvar oswarn = ''; it may or may not work''
	test x"$TARGET_OSREV" = x"" && setvar TARGET_OSREV = $(uname -r)
	}
* {
	setvar oswarn = ''; it may or may not work''
	test x"$TARGET_OSREV" = x"" && setvar TARGET_OSREV = $(uname -r)
	}
}

: ${HAVE_MKNOD=0}

: "${AWK=awk}${CC=cc}${NROFF=nroff}${SIZE=size}"
test 0 = $r && echo | $NROFF -v 2>&1 | grep GNU >/dev/null 2>&1 && \
    echo | $NROFF -c >/dev/null 2>&1 && setvar NROFF = ""$NROFF -c""

# this aids me in tracing FTBFSen without access to the buildd
$e "Hi from$ao $bi$srcversion$ao on:"
case (TARGET_OS) {
AIX {
	vv '|' "oslevel >&2"
	vv '|' "uname -a >&2"
	}
Darwin {
	vv '|' "hwprefs machine_type os_type os_class >&2"
	vv '|' "sw_vers >&2"
	vv '|' "system_profiler SPSoftwareDataType SPHardwareDataType >&2"
	vv '|' "/bin/sh --version >&2"
	vv '|' "xcodebuild -version >&2"
	vv '|' "uname -a >&2"
	vv '|' "sysctl kern.version hw.machine hw.model hw.memsize hw.availcpu hw.cpufrequency hw.byteorder hw.cpu64bit_capable >&2"
	}
IRIX* {
	vv '|' "uname -a >&2"
	vv '|' "hinv -v >&2"
	}
OSF1 {
	vv '|' "uname -a >&2"
	vv '|' "/usr/sbin/sizer -v >&2"
	}
SCO_SV|UnixWare|UNIX_SV {
	vv '|' "uname -a >&2"
	vv '|' "uname -X >&2"
	}
* {
	vv '|' "uname -a >&2"
	}
}
test -z $oswarn || echo >&2 "
Warning: mksh has not yet been ported to or tested on your
operating system '$TARGET_OS'$oswarn. If you can provide
a shell account to the developer, this may improve; please
drop us a success or failure notice or even send in diffs.
>&2 "
Warning: mksh has not yet been ported to or tested on your
operating system '$TARGET_OS'$oswarn. If you can provide
a shell account to the developer, this may improve; please
drop us a success or failure notice or even send in diffs.
"
$e "$bi$me: Building the MirBSD Korn Shell$ao $ui$dstversion$ao on $TARGET_OS ${TARGET_OSREV}..."

#
# Begin of mirtoconf checks
#
$e $bi$me: Scanning for functions... please ignore any errors.$ao

#
# Compiler: which one?
#
# notes:
# - ICC defines __GNUC__ too
# - GCC defines __hpux too
# - LLVM+clang defines __GNUC__ too
# - nwcc defines __GNUC__ too
setvar CPP = ""$CC -E""
$e ... which compiler seems to be used
cat >conftest.c <<< '''
const char *
#if defined(__ICC) || defined(__INTEL_COMPILER)
ct="icc"
#elif defined(__xlC__) || defined(__IBMC__)
ct="xlc"
#elif defined(__SUNPRO_C)
ct="sunpro"
#elif defined(__ACK__)
ct="ack"
#elif defined(__BORLANDC__)
ct="bcc"
#elif defined(__WATCOMC__)
ct="watcom"
#elif defined(__MWERKS__)
ct="metrowerks"
#elif defined(__HP_cc)
ct="hpcc"
#elif defined(__DECC) || (defined(__osf__) && !defined(__GNUC__))
ct="dec"
#elif defined(__PGI)
ct="pgi"
#elif defined(__DMC__)
ct="dmc"
#elif defined(_MSC_VER)
ct="msc"
#elif defined(__ADSPBLACKFIN__) || defined(__ADSPTS__) || defined(__ADSP21000__)
ct="adsp"
#elif defined(__IAR_SYSTEMS_ICC__)
ct="iar"
#elif defined(SDCC)
ct="sdcc"
#elif defined(__PCC__)
ct="pcc"
#elif defined(__TenDRA__)
ct="tendra"
#elif defined(__TINYC__)
ct="tcc"
#elif defined(__llvm__) && defined(__clang__)
ct="clang"
#elif defined(__NWCC__)
ct="nwcc"
#elif defined(__GNUC__)
ct="gcc"
#elif defined(_COMPILER_VERSION)
ct="mipspro"
#elif defined(__sgi)
ct="mipspro"
#elif defined(__hpux) || defined(__hpua)
ct="hpcc"
#elif defined(__ultrix)
ct="ucode"
#elif defined(__USLC__)
ct="uslc"
#elif defined(__LCC__)
ct="lcc"
#elif defined(MKSH_MAYBE_KENCC)
/* and none of the above matches */
ct="kencc"
#else
ct="unknown"
#endif
;
const char *
#if defined(__KLIBC__) && !defined(__OS2__)
et="klibc"
#else
et="unknown"
#endif
;
'''
setvar ct = 'untested'
setvar et = 'untested'
vv ']' "$CPP $CFLAGS $CPPFLAGS $NOWARN conftest.c | \
    sed -n '/^ *[ce]t *= */s/^ *\([ce]t\) *= */\1=/p' | tr -d \\\\015 >x"
sed 's/^/[ /' x
eval $(cat x)
rmf x vv.out
cat >conftest.c <<< '''
#include <unistd.h>
int main(void) { return (isatty(0)); }
'''
case (ct) {
ack {
	# work around "the famous ACK const bug"
	setvar CPPFLAGS = ""-Dconst= $CPPFLAGS""
	}
adsp {
	echo >&2 'Warning: Analog Devices C++ compiler for Blackfin, TigerSHARC
    and SHARC (21000) DSPs detected. This compiler has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.>&2 'Warning: Analog Devices C++ compiler for Blackfin, TigerSHARC
    and SHARC (21000) DSPs detected. This compiler has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.'
	}
bcc {
	echo >&2 "Warning: Borland C++ Builder detected. This compiler might
    produce broken executables. Continue at your own risk,
    please report success/failure to the developers.>&2 "Warning: Borland C++ Builder detected. This compiler might
    produce broken executables. Continue at your own risk,
    please report success/failure to the developers."
	}
clang {
	# does not work with current "ccc" compiler driver
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -version"
	# one of these two works, for now
	vv '|' "${CLANG-clang} -version"
	vv '|' "${CLANG-clang} --version"
	# ensure compiler and linker are in sync unless overridden
	case (CCC_CC) {
	:* {	}
	*: {	setvar CCC_LD = "$CCC_CC"; export CCC_LD }
	}
	}
dec {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -V"
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -Wl,-V conftest.c $LIBS"
	}
dmc {
	echo >&2 "Warning: Digital Mars Compiler detected. When running under>&2 "Warning: Digital Mars Compiler detected. When running under"
	echo >&2 "    UWIN, mksh tends to be unstable due to the limitations>&2 "    UWIN, mksh tends to be unstable due to the limitations"
	echo >&2 "    of this platform. Continue at your own risk,>&2 "    of this platform. Continue at your own risk,"
	echo >&2 "    please report success/failure to the developers.>&2 "    please report success/failure to the developers."
	}
gcc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -v conftest.c $LIBS"
	vv '|' 'echo `$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS \
	    -dumpmachine` gcc`$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN \
	    $LIBS -dumpversion`'
	}
hpcc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -V conftest.c $LIBS"
	}
iar {
	echo >&2 'Warning: IAR Systems (http://www.iar.com) compiler for embedded
    systems detected. This unsupported compiler has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.>&2 'Warning: IAR Systems (http://www.iar.com) compiler for embedded
    systems detected. This unsupported compiler has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.'
	}
icc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -V"
	}
kencc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -v conftest.c $LIBS"
	}
lcc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -v conftest.c $LIBS"
	add_cppflags -D__inline__=__inline
	}
metrowerks {
	echo >&2 'Warning: Metrowerks C compiler detected. This has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.>&2 'Warning: Metrowerks C compiler detected. This has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.'
	}
mipspro {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -version"
	}
msc {
	setvar ccpr = ''		# errorlevels are not reliable
	case (TARGET_OS) {
	Interix {
		if [[ -n $C89_COMPILER ]] {
			setvar C89_COMPILER = $(ntpath2posix -c $C89_COMPILER)
		} else {
			setvar C89_COMPILER = 'CL.EXE'
		}
		if [[ -n $C89_LINKER ]] {
			setvar C89_LINKER = $(ntpath2posix -c $C89_LINKER)
		} else {
			setvar C89_LINKER = 'LINK.EXE'
		}
		vv '|' "$C89_COMPILER /HELP >&2"
		vv '|' "$C89_LINKER /LINK >&2"
		}
	}
	}
nwcc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -version"
	}
pcc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -v"
	}
pgi {
	echo >&2 'Warning: PGI detected. This unknown compiler has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.>&2 'Warning: PGI detected. This unknown compiler has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.'
	}
sdcc {
	echo >&2 'Warning: sdcc (http://sdcc.sourceforge.net), the small devices
    C compiler for embedded systems detected. This has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.>&2 'Warning: sdcc (http://sdcc.sourceforge.net), the small devices
    C compiler for embedded systems detected. This has not yet
    been tested for compatibility with mksh. Continue at your
    own risk, please report success/failure to the developers.'
	}
sunpro {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -V conftest.c $LIBS"
	}
tcc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -v"
	}
tendra {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -V 2>&1 | \
	    grep -F -i -e version -e release"
	}
ucode {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -V"
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -Wl,-V conftest.c $LIBS"
	}
uslc {
	case (TARGET_OS) {
	SCO_SV:3.2* {
		# SCO OpenServer 5
		setvar CFLAGS = ""$CFLAGS -g""
		: "${HAVE_CAN_OTWO=0}${HAVE_CAN_OPTIMISE=0}"
		}
	}
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -V conftest.c $LIBS"
	}
watcom {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -v conftest.c $LIBS"
	}
xlc {
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -qversion"
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN $LIBS -qversion=verbose"
	vv '|' "ld -V"
	}
* {
	test x"$ct" = x"untested" && $e "!!! detecting preprocessor failed"
	setvar ct = 'unknown'
	vv "$CC --version"
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -v conftest.c $LIBS"
	vv '|' "$CC $CFLAGS $CPPFLAGS $LDFLAGS $NOWARN -V conftest.c $LIBS"
	}
}
case (cm) {
dragonegg|llvm {
	vv '|' "llc -version"
	}
}
setvar etd = "" on $et""
case (et) {
klibc {
	add_cppflags -DMKSH_NO_LIMITS
	}
unknown {
	# nothing special detected, don’t worry
	setvar etd = ''
	}
* {
	# huh?
	}
}
$e "$bi==> which compiler seems to be used...$ao $ui$ct$etd$ao"
rmf conftest.c conftest.o conftest a.out* a.exe* conftest.exe* vv.out

#
# Compiler: works as-is, with -Wno-error and -Werror
#
setvar save_NOWARN = "$NOWARN"
setvar NOWARN = ''
setvar DOWARN = ''
ac_flags 0 compiler_works '' 'if the compiler works'
test 1 = $HAVE_CAN_COMPILER_WORKS || exit 1
setvar HAVE_COMPILER_KNOWN = '0'
test $ct = unknown || setvar HAVE_COMPILER_KNOWN = '1'
if ac_ifcpp 'if 0' compiler_fails '' \
    'if the compiler does not fail correctly' {
	setvar save_CFLAGS = "$CFLAGS"
	: ${HAVE_CAN_DELEXE=x}
	case (ct) {
	dec {
		setvar CFLAGS = ""$CFLAGS ${ccpl}-non_shared""
		ac_testn can_delexe compiler_fails 0 'for the -non_shared linker option' <<< """
#include <unistd.h>
int main(void) { return (isatty(0)); }
"""
		}
	dmc {
		setvar CFLAGS = ""$CFLAGS ${ccpl}/DELEXECUTABLE""
		ac_testn can_delexe compiler_fails 0 'for the /DELEXECUTABLE linker option' <<< """
#include <unistd.h>
int main(void) { return (isatty(0)); }
"""
		}
	* {
		exit 1
		}
	}
	test 1 = $HAVE_CAN_DELEXE || setvar CFLAGS = "$save_CFLAGS"
	ac_testn compiler_still_fails '' 'if the compiler still does not fail correctly' <<< """"""
	test 1 = $HAVE_COMPILER_STILL_FAILS && exit 1
}
if ac_ifcpp 'ifdef __TINYC__' couldbe_tcc '!' compiler_known 0 \
    'if this could be tcc' {
	setvar ct = 'tcc'
	setvar CPP = ''cpp -D__TINYC__''
	setvar HAVE_COMPILER_KNOWN = '1'
}

case (ct) {
bcc {
	setvar save_NOWARN = ""${ccpc}-w""
	setvar DOWARN = ""${ccpc}-w!""
	}
dec {
	# -msg_* flags not used yet, or is -w2 correct?
	}
dmc {
	setvar save_NOWARN = ""${ccpc}-w""
	setvar DOWARN = ""${ccpc}-wx""
	}
hpcc {
	setvar save_NOWARN = ''
	setvar DOWARN = "+We"
	}
kencc {
	setvar save_NOWARN = ''
	setvar DOWARN = ''
	}
mipspro {
	setvar save_NOWARN = ''
	setvar DOWARN = ""-diag_error 1-10000""
	}
msc {
	setvar save_NOWARN = ""${ccpc}/w""
	setvar DOWARN = ""${ccpc}/WX""
	}
sunpro {
	test x"$save_NOWARN" = x"" && setvar save_NOWARN = ''-errwarn=%none''
	ac_flags 0 errwarnnone $save_NOWARN
	test 1 = $HAVE_CAN_ERRWARNNONE || setvar save_NOWARN = ''
	ac_flags 0 errwarnall "-errwarn=%all"
	test 1 = $HAVE_CAN_ERRWARNALL && setvar DOWARN = ""-errwarn=%all""
	}
tendra {
	setvar save_NOWARN = '-w'
	}
ucode {
	setvar save_NOWARN = ''
	setvar DOWARN = '-w2'
	}
watcom {
	setvar save_NOWARN = ''
	setvar DOWARN = "-Wc,-we"
	}
xlc {
	setvar save_NOWARN = "-qflag=i:e"
	setvar DOWARN = "-qflag=i:i"
	}
* {
	test x"$save_NOWARN" = x"" && setvar save_NOWARN = '-Wno-error'
	ac_flags 0 wnoerror $save_NOWARN
	test 1 = $HAVE_CAN_WNOERROR || setvar save_NOWARN = ''
	ac_flags 0 werror -Werror
	test 1 = $HAVE_CAN_WERROR && setvar DOWARN = '-Werror'
	test $ct = icc && setvar DOWARN = ""$DOWARN -wd1419""
	}
}
setvar NOWARN = "$save_NOWARN"

#
# Compiler: extra flags (-O2 -f* -W* etc.)
#
setvar i = $(echo :"$orig_CFLAGS" | sed 's/^://' | tr -c -d $alll$allu$alln)
# optimisation: only if orig_CFLAGS is empty
test x"$i" = x"" && case (ct) {
hpcc {
	setvar phase = 'u'
	ac_flags 1 otwo +O2
	setvar phase = 'x'
	}
kencc|tcc|tendra {
	# no special optimisation
	}
sunpro {
	cat >x <<< '''
#include <unistd.h>
int main(void) { return (isatty(0)); }
#define __IDSTRING_CONCAT(l,p)	__LINTED__ ## l ## _ ## p
#define __IDSTRING_EXPAND(l,p)	__IDSTRING_CONCAT(l,p)
#define pad			void __IDSTRING_EXPAND(__LINE__,x)(void) { }
'''
	yes pad | head -n 256 >>x
	ac_flags - 1 otwo -xO2 <x
	rmf x
	}
xlc {
	ac_flags 1 othree "-O3 -qstrict"
	test 1 = $HAVE_CAN_OTHREE || ac_flags 1 otwo -O2
	}
* {
	ac_flags 1 otwo -O2
	test 1 = $HAVE_CAN_OTWO || ac_flags 1 optimise -O
	}
}
# other flags: just add them if they are supported
setvar i = '0'
case (ct) {
bcc {
	ac_flags 1 strpool "${ccpc}-d" 'if string pooling can be enabled'
	}
clang {
	setvar i = '1'
	}
dec {
	ac_flags 0 verb -verbose
	ac_flags 1 rodata -readonly_strings
	}
dmc {
	ac_flags 1 decl "${ccpc}-r" 'for strict prototype checks'
	ac_flags 1 schk "${ccpc}-s" 'for stack overflow checking'
	}
gcc {
	# The following tests run with -Werror (gcc only) if possible
	setvar NOWARN = "$DOWARN"; setvar phase = 'u'
	ac_flags 1 wnodeprecateddecls -Wno-deprecated-declarations
	# mksh is not written in CFrustFrust!
	ac_flags 1 no_eh_frame -fno-asynchronous-unwind-tables
	ac_flags 1 fnostrictaliasing -fno-strict-aliasing
	ac_flags 1 fstackprotectorstrong -fstack-protector-strong
	test 1 = $HAVE_CAN_FSTACKPROTECTORSTRONG || \
	    ac_flags 1 fstackprotectorall -fstack-protector-all
	test $cm = dragonegg && case{
	*\ -fplugin=*dragonegg* { }
	* { ac_flags 1 fplugin_dragonegg -fplugin=dragonegg }
	}
	case (cm) {
	combine {
		setvar fv = '0'
		setvar checks = ''7 8''
		}
	lto {
		setvar fv = '0'
		setvar checks = ''1 2 3 4 5 6 7 8''
		}
	* {
		setvar fv = '1'
		}
	}
	test $fv = 1 || for what in $checks {
		test $fv = 1 && break
		case (what) {
		1 {	setvar t_cflags = ''-flto=jobserver''
			setvar t_ldflags = ''-fuse-linker-plugin''
			setvar t_use = '1', t_name = 'fltojs_lp' }
		2 {	setvar t_cflags = ''-flto=jobserver',' t_ldflags = ''''
			setvar t_use = '1', t_name = 'fltojs_nn' }
		3 {	setvar t_cflags = ''-flto=jobserver''
			setvar t_ldflags = ''-fno-use-linker-plugin -fwhole-program''
			setvar t_use = '1', t_name = 'fltojs_np' }
		4 {	setvar t_cflags = ''-flto''
			setvar t_ldflags = ''-fuse-linker-plugin''
			setvar t_use = '1', t_name = 'fltons_lp' }
		5 {	setvar t_cflags = ''-flto',' t_ldflags = ''''
			setvar t_use = '1', t_name = 'fltons_nn' }
		6 {	setvar t_cflags = ''-flto''
			setvar t_ldflags = ''-fno-use-linker-plugin -fwhole-program''
			setvar t_use = '1', t_name = 'fltons_np' }
		7 {	setvar t_cflags = ''-fwhole-program --combine',' t_ldflags = ''''
			setvar t_use = '0', t_name = 'combine', cm = 'combine' }
		8 {	setvar fv = '1', cm = 'normal' }
		}
		test $fv = 1 && break
		ac_flags $t_use $t_name $t_cflags \
		    "if gcc supports $t_cflags $t_ldflags" $t_ldflags
	}
	setvar i = '1'
	}
hpcc {
	setvar phase = 'u'
	# probably not needed
	#ac_flags 1 agcc -Agcc 'for support of GCC extensions'
	setvar phase = 'x'
	}
icc {
	ac_flags 1 fnobuiltinsetmode -fno-builtin-setmode
	ac_flags 1 fnostrictaliasing -fno-strict-aliasing
	ac_flags 1 fstacksecuritycheck -fstack-security-check
	setvar i = '1'
	}
mipspro {
	ac_flags 1 fullwarn -fullwarn 'for remark output support'
	}
msc {
	ac_flags 1 strpool "${ccpc}/GF" 'if string pooling can be enabled'
	echo 'int main(void) { char test[64] = ""; return (*test); }' >x
	ac_flags - 1 stackon "${ccpc}/GZ" 'if stack checks can be enabled' <x
	ac_flags - 1 stckall "${ccpc}/Ge" 'stack checks for all functions' <x
	ac_flags - 1 secuchk "${ccpc}/GS" 'for compiler security checks' <x
	rmf x
	ac_flags 1 wall "${ccpc}/Wall" 'to enable all warnings'
	ac_flags 1 wp64 "${ccpc}/Wp64" 'to enable 64-bit warnings'
	}
nwcc {
	setvar i = '1'
	#broken# ac_flags 1 ssp -stackprotect
	}
sunpro {
	setvar phase = 'u'
	ac_flags 1 v -v
	ac_flags 1 ipo -xipo 'for cross-module optimisation'
	setvar phase = 'x'
	}
tcc {
	: #broken# ac_flags 1 boundschk -b
	}
tendra {
	ac_flags 0 ysystem -Ysystem
	test 1 = $HAVE_CAN_YSYSTEM && setvar CPPFLAGS = ""-Ysystem $CPPFLAGS""
	ac_flags 1 extansi -Xa
	}
xlc {
	ac_flags 1 rodata "-qro -qroconst -qroptr"
	ac_flags 1 rtcheck -qcheck=all
	#ac_flags 1 rtchkc -qextchk	# reported broken
	ac_flags 1 wformat "-qformat=all -qformat=nozln"
	#ac_flags 1 wp64 -qwarn64	# too verbose for now
	}
}
# flags common to a subset of compilers (run with -Werror on gcc)
if test 1 = $i {
	ac_flags 1 wall -Wall
	ac_flags 1 fwrapv -fwrapv
}

setvar phase = 'x'
# The following tests run with -Werror or similar (all compilers) if possible
setvar NOWARN = "$DOWARN"
test $ct = pcc && setvar phase = 'u'

#
# Compiler: check for stuff that only generates warnings
#
ac_test attribute_bounded '' 'for __attribute__((__bounded__))' <<< '''
#if defined(__TenDRA__) || (defined(__GNUC__) && (__GNUC__ < 2))
extern int thiswillneverbedefinedIhope(void);
/* force a failure: TenDRA and gcc 1.42 have false positive here */
int main(void) { return (thiswillneverbedefinedIhope()); }
#else
#include <string.h>
#undef __attribute__
int xcopy(const void *, void *, size_t)
    __attribute__((__bounded__(__buffer__, 1, 3)))
    __attribute__((__bounded__(__buffer__, 2, 3)));
int main(int ac, char *av[]) { return (xcopy(av[0], av[--ac], 1)); }
int xcopy(const void *s, void *d, size_t n) {
/*
 * if memmove does not exist, we are not on a system
 * with GCC with __bounded__ attribute either so poo
 */
memmove(d, s, n); return ((int)n);
}
#endif
'''
ac_test attribute_format '' 'for __attribute__((__format__))' <<< '''
#if defined(__TenDRA__) || (defined(__GNUC__) && (__GNUC__ < 2))
extern int thiswillneverbedefinedIhope(void);
/* force a failure: TenDRA and gcc 1.42 have false positive here */
int main(void) { return (thiswillneverbedefinedIhope()); }
#else
#define fprintf printfoo
#include <stdio.h>
#undef __attribute__
#undef fprintf
extern int fprintf(FILE *, const char *format, ...)
    __attribute__((__format__(__printf__, 2, 3)));
int main(int ac, char **av) { return (fprintf(stderr, "%s%d", *av, ac)); }
#endif
'''
ac_test attribute_noreturn '' 'for __attribute__((__noreturn__))' <<< '''
#if defined(__TenDRA__) || (defined(__GNUC__) && (__GNUC__ < 2))
extern int thiswillneverbedefinedIhope(void);
/* force a failure: TenDRA and gcc 1.42 have false positive here */
int main(void) { return (thiswillneverbedefinedIhope()); }
#else
#include <stdlib.h>
#undef __attribute__
void fnord(void) __attribute__((__noreturn__));
int main(void) { fnord(); }
void fnord(void) { exit(0); }
#endif
'''
ac_test attribute_pure '' 'for __attribute__((__pure__))' <<< '''
#if defined(__TenDRA__) || (defined(__GNUC__) && (__GNUC__ < 2))
extern int thiswillneverbedefinedIhope(void);
/* force a failure: TenDRA and gcc 1.42 have false positive here */
int main(void) { return (thiswillneverbedefinedIhope()); }
#else
#include <unistd.h>
#undef __attribute__
int foo(const char *) __attribute__((__pure__));
int main(int ac, char **av) { return (foo(av[ac - 1]) + isatty(0)); }
int foo(const char *s) { return ((int)s[0]); }
#endif
'''
ac_test attribute_unused '' 'for __attribute__((__unused__))' <<< '''
#if defined(__TenDRA__) || (defined(__GNUC__) && (__GNUC__ < 2))
extern int thiswillneverbedefinedIhope(void);
/* force a failure: TenDRA and gcc 1.42 have false positive here */
int main(void) { return (thiswillneverbedefinedIhope()); }
#else
#include <unistd.h>
#undef __attribute__
int main(int ac __attribute__((__unused__)), char **av
    __attribute__((__unused__))) { return (isatty(0)); }
#endif
'''
ac_test attribute_used '' 'for __attribute__((__used__))' <<< '''
#if defined(__TenDRA__) || (defined(__GNUC__) && (__GNUC__ < 2))
extern int thiswillneverbedefinedIhope(void);
/* force a failure: TenDRA and gcc 1.42 have false positive here */
int main(void) { return (thiswillneverbedefinedIhope()); }
#else
#include <unistd.h>
#undef __attribute__
static const char fnord[] __attribute__((__used__)) = "42";
int main(void) { return (isatty(0)); }
#endif
'''

# End of tests run with -Werror
setvar NOWARN = "$save_NOWARN"
setvar phase = 'x'

#
# mksh: flavours (full/small mksh, omit certain stuff)
#
if ac_ifcpp 'ifdef MKSH_SMALL' isset_MKSH_SMALL '' \
    "if a reduced-feature mksh is requested" {
	: ${HAVE_NICE=0}
	: ${HAVE_PERSISTENT_HISTORY=0}
	setvar check_categories = ""$check_categories smksh""
	setvar HAVE_ISSET_MKSH_CONSERVATIVE_FDS = '1'	# from sh.h
}
ac_ifcpp 'if defined(MKSH_BINSHPOSIX) || defined(MKSH_BINSHREDUCED)' \
    isset_MKSH_BINSH '' 'if invoking as sh should be handled specially' && \
    setvar check_categories = ""$check_categories binsh""
ac_ifcpp 'ifdef MKSH_UNEMPLOYED' isset_MKSH_UNEMPLOYED '' \
    "if mksh will be built without job control" && \
    setvar check_categories = ""$check_categories arge""
ac_ifcpp 'ifdef MKSH_NOPROSPECTOFWORK' isset_MKSH_NOPROSPECTOFWORK '' \
    "if mksh will be built without job signals" && \
    setvar check_categories = ""$check_categories arge nojsig""
ac_ifcpp 'ifdef MKSH_ASSUME_UTF8' isset_MKSH_ASSUME_UTF8 '' \
    'if the default UTF-8 mode is specified' && : ${HAVE_SETLOCALE_CTYPE=0}
ac_ifcpp 'ifdef MKSH_CONSERVATIVE_FDS' isset_MKSH_CONSERVATIVE_FDS '' \
    'if traditional/conservative fd use is requested' && \
    setvar check_categories = ""$check_categories convfds""
#ac_ifcpp 'ifdef MKSH_DISABLE_DEPRECATED' isset_MKSH_DISABLE_DEPRECATED '' \
#    "if deprecated features are to be omitted" && \
#    check_categories="$check_categories nodeprecated"
#ac_ifcpp 'ifdef MKSH_DISABLE_EXPERIMENTAL' isset_MKSH_DISABLE_EXPERIMENTAL '' \
#    "if experimental features are to be omitted" && \
#    check_categories="$check_categories noexperimental"
ac_ifcpp 'ifdef MKSH_MIDNIGHTBSD01ASH_COMPAT' isset_MKSH_MIDNIGHTBSD01ASH_COMPAT '' \
    'if the MidnightBSD 0.1 ash compatibility mode is requested' && \
    setvar check_categories = ""$check_categories mnbsdash""

#
# Environment: headers
#
ac_header sys/time.h sys/types.h
ac_header time.h sys/types.h
test "11" = "$HAVE_SYS_TIME_H$HAVE_TIME_H" || setvar HAVE_BOTH_TIME_H = '0'
ac_test both_time_h '' 'whether <sys/time.h> and <time.h> can both be included' <<< '''
#include <sys/types.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
int main(void) { struct tm tm; return ((int)sizeof(tm) + isatty(0)); }
'''
ac_header sys/bsdtypes.h
ac_header sys/file.h sys/types.h
ac_header sys/mkdev.h sys/types.h
ac_header sys/mman.h sys/types.h
ac_header sys/param.h
ac_header sys/resource.h sys/types.h _time
ac_header sys/select.h sys/types.h
ac_header sys/sysmacros.h
ac_header bstring.h
ac_header grp.h sys/types.h
ac_header io.h
ac_header libgen.h
ac_header libutil.h sys/types.h
ac_header paths.h
ac_header stdint.h stdarg.h
# include strings.h only if compatible with string.h
ac_header strings.h sys/types.h string.h
ac_header termios.h
ac_header ulimit.h sys/types.h
ac_header values.h

#
# Environment: definitions
#
echo '#include <sys/types.h>
#include <unistd.h>
/* check that off_t can represent 2^63-1 correctly, thx FSF */
#define LARGE_OFF_T ((((off_t)1 << 31) << 31) - 1 + (((off_t)1 << 31) << 31))
int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 &&
    LARGE_OFF_T % 2147483647 == 1) ? 1 : -1];
int main(void) { return (isatty(0)); }' >lft.c
ac_testn can_lfs '' "for large file support" <lft.c
setvar save_CPPFLAGS = "$CPPFLAGS"
add_cppflags -D_FILE_OFFSET_BITS=64
ac_testn can_lfs_sus '!' can_lfs 0 "... with -D_FILE_OFFSET_BITS=64" <lft.c
if test 0 = $HAVE_CAN_LFS_SUS {
	setvar CPPFLAGS = "$save_CPPFLAGS"
	add_cppflags -D_LARGE_FILES=1
	ac_testn can_lfs_aix '!' can_lfs 0 "... with -D_LARGE_FILES=1" <lft.c
	test 1 = $HAVE_CAN_LFS_AIX || setvar CPPFLAGS = "$save_CPPFLAGS"
}
rm -f lft.c
rmf lft*	# end of large file support test

#
# Environment: types
#
ac_test can_inttypes '!' stdint_h 1 "for standard 32-bit integer types" <<< '''
#include <sys/types.h>
#include <stddef.h>
int main(int ac, char **av) { return ((uint32_t)(size_t)*av + (int32_t)ac); }
'''
ac_test can_ucbints '!' can_inttypes 1 "for UCB 32-bit integer types" <<< '''
#include <sys/types.h>
#include <stddef.h>
int main(int ac, char **av) { return ((u_int32_t)(size_t)*av + (int32_t)ac); }
'''
ac_test can_int8type '!' stdint_h 1 "for standard 8-bit integer type" <<< '''
#include <sys/types.h>
#include <stddef.h>
int main(int ac, char **av) { return ((uint8_t)(size_t)av[ac]); }
'''
ac_test can_ucbint8 '!' can_int8type 1 "for UCB 8-bit integer type" <<< '''
#include <sys/types.h>
#include <stddef.h>
int main(int ac, char **av) { return ((u_int8_t)(size_t)av[ac]); }
'''

ac_test rlim_t <<< '''
#include <sys/types.h>
#if HAVE_BOTH_TIME_H
#include <sys/time.h>
#include <time.h>
#elif HAVE_SYS_TIME_H
#include <sys/time.h>
#elif HAVE_TIME_H
#include <time.h>
#endif
#if HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#include <unistd.h>
int main(void) { return (((int)(rlim_t)0) + isatty(0)); }
'''

# only testn: added later below
ac_testn sig_t <<< '''
#include <sys/types.h>
#include <signal.h>
#include <stddef.h>
volatile sig_t foo = (sig_t)0;
int main(void) { return (foo == (sig_t)0); }
'''

ac_testn sighandler_t '!' sig_t 0 <<< '''
#include <sys/types.h>
#include <signal.h>
#include <stddef.h>
volatile sighandler_t foo = (sighandler_t)0;
int main(void) { return (foo == (sighandler_t)0); }
'''
if test 1 = $HAVE_SIGHANDLER_T {
	add_cppflags -Dsig_t=sighandler_t
	setvar HAVE_SIG_T = '1'
}

ac_testn __sighandler_t '!' sig_t 0 <<< '''
#include <sys/types.h>
#include <signal.h>
#include <stddef.h>
volatile __sighandler_t foo = (__sighandler_t)0;
int main(void) { return (foo == (__sighandler_t)0); }
'''
if test 1 = $HAVE___SIGHANDLER_T {
	add_cppflags -Dsig_t=__sighandler_t
	setvar HAVE_SIG_T = '1'
}

test 1 = $HAVE_SIG_T || add_cppflags -Dsig_t=nosig_t
ac_cppflags SIG_T

#
# check whether whatever we use for the final link will succeed
#
if test $cm = makefile {
	: nothing to check
} else {
	setvar HAVE_LINK_WORKS = 'x'
	ac_testinit link_works '' 'checking if the final link command may succeed'
	setvar fv = '1'
	cat >conftest.c <<< """
#define EXTERN
#define MKSH_INCLUDES_ONLY
#include "sh.h"
__RCSID("$srcversion");
int main(void) { printf("Hello, World!'\'n"); return (isatty(0)); }
"""
	case (cm) {
	llvm {
		v "$CC $CFLAGS $CPPFLAGS $NOWARN -emit-llvm -c conftest.c" || setvar fv = '0'
		rmf $tfn.s
		test $fv = 0 || v "llvm-link -o - conftest.o | opt $optflags | llc -o $tfn.s" || setvar fv = '0'
		test $fv = 0 || v "$CC $CFLAGS $LDFLAGS -o $tcfn $tfn.s $LIBS $ccpr"
		}
	dragonegg {
		v "$CC $CFLAGS $CPPFLAGS $NOWARN -S -flto conftest.c" || setvar fv = '0'
		test $fv = 0 || v "mv conftest.s conftest.ll"
		test $fv = 0 || v "llvm-as conftest.ll" || setvar fv = '0'
		rmf $tfn.s
		test $fv = 0 || v "llvm-link -o - conftest.bc | opt $optflags | llc -o $tfn.s" || setvar fv = '0'
		test $fv = 0 || v "$CC $CFLAGS $LDFLAGS -o $tcfn $tfn.s $LIBS $ccpr"
		}
	combine {
		v "$CC $CFLAGS $CPPFLAGS $LDFLAGS -fwhole-program --combine $NOWARN -o $tcfn conftest.c $LIBS $ccpr"
		}
	lto|normal {
		setvar cm = 'normal'
		v "$CC $CFLAGS $CPPFLAGS $NOWARN -c conftest.c" || setvar fv = '0'
		test $fv = 0 || v "$CC $CFLAGS $LDFLAGS -o $tcfn conftest.o $LIBS $ccpr"
		}
	}
	test -f $tcfn || setvar fv = '0'
	ac_testdone
	test $fv = 1 || exit 1
}

#
# Environment: errors and signals
#
test x"NetBSD" = x"$TARGET_OS" && $e Ignore the compatibility warning.

ac_testn sys_errlist '' "the sys_errlist[] array and sys_nerr" <<< '''
extern const int sys_nerr;
extern const char * const sys_errlist[];
extern int isatty(int);
int main(void) { return (*sys_errlist[sys_nerr - 1] + isatty(0)); }
'''
ac_testn _sys_errlist '!' sys_errlist 0 "the _sys_errlist[] array and _sys_nerr" <<< '''
extern const int _sys_nerr;
extern const char * const _sys_errlist[];
extern int isatty(int);
int main(void) { return (*_sys_errlist[_sys_nerr - 1] + isatty(0)); }
'''
if test 1 = $HAVE__SYS_ERRLIST {
	add_cppflags -Dsys_nerr=_sys_nerr
	add_cppflags -Dsys_errlist=_sys_errlist
	setvar HAVE_SYS_ERRLIST = '1'
}
ac_cppflags SYS_ERRLIST

for what in name list {
	setvar uwhat = $(upper $what)
	ac_testn sys_sig$what '' "the sys_sig${what}[] array" <<< """
extern const char * const sys_sig${what}[];
extern int isatty(int);
int main(void) { return (sys_sig${what}[0][0] + isatty(0)); }
"""
	ac_testn _sys_sig$what '!' sys_sig$what 0 "the _sys_sig${what}[] array" <<< """
extern const char * const _sys_sig${what}[];
extern int isatty(int);
int main(void) { return (_sys_sig${what}[0][0] + isatty(0)); }
"""
	eval uwhat_v='$'HAVE__SYS_SIG$uwhat
	if test 1 = $uwhat_v {
		add_cppflags -Dsys_sig$what=_sys_sig$what
		eval HAVE_SYS_SIG$uwhat=1
	}
	ac_cppflags SYS_SIG$uwhat
}

#
# Environment: library functions
#
ac_test flock <<< '''
#include <sys/types.h>
#include <fcntl.h>
#undef flock
#if HAVE_SYS_FILE_H
#include <sys/file.h>
#endif
int main(void) { return (flock(0, LOCK_EX | LOCK_UN)); }
'''

ac_test lock_fcntl '!' flock 1 'whether we can lock files with fcntl' <<< '''
#include <fcntl.h>
#undef flock
int main(void) {
struct flock lks;
lks.l_type = F_WRLCK | F_UNLCK;
return (fcntl(0, F_SETLKW, &lks));
}
'''

ac_test getrusage <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
int main(void) {
struct rusage ru;
return (getrusage(RUSAGE_SELF, &ru) +
    getrusage(RUSAGE_CHILDREN, &ru));
}
'''

ac_test getsid <<< '''
#include <unistd.h>
int main(void) { return ((int)getsid(0)); }
'''

ac_test gettimeofday <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
int main(void) { struct timeval tv; return (gettimeofday(&tv, NULL)); }
'''

ac_test killpg <<< '''
#include <signal.h>
int main(int ac, char *av[]) { return (av[0][killpg(123, ac)]); }
'''

ac_test memmove <<< '''
#include <sys/types.h>
#include <stddef.h>
#include <string.h>
#if HAVE_STRINGS_H
#include <strings.h>
#endif
int main(int ac, char *av[]) {
return (*(int *)(void *)memmove(av[0], av[1], (size_t)ac));
}
'''

ac_test mknod '' 'if to use mknod(), makedev() and friends' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
int main(int ac, char *av[]) {
dev_t dv;
dv = makedev((unsigned int)ac, (unsigned int)av[0][0]);
return (mknod(av[0], (mode_t)0, dv) ? (int)major(dv) :
    (int)minor(dv));
}
'''

ac_test mmap lock_fcntl 0 'for mmap and munmap' <<< '''
#include <sys/types.h>
#if HAVE_SYS_FILE_H
#include <sys/file.h>
#endif
#if HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#include <stddef.h>
#include <stdlib.h>
int main(void) { return ((void *)mmap(NULL, (size_t)0,
    PROT_READ, MAP_PRIVATE, 0, (off_t)0) == (void *)NULL ? 1 :
    munmap(NULL, 0)); }
'''

ac_test nice <<< '''
#include <unistd.h>
int main(void) { return (nice(4)); }
'''

ac_test revoke <<< '''
#include <sys/types.h>
#if HAVE_LIBUTIL_H
#include <libutil.h>
#endif
#include <unistd.h>
int main(int ac, char *av[]) { return (ac + revoke(av[0])); }
'''

ac_test setlocale_ctype '' 'setlocale(LC_CTYPE, "")' <<< '''
#include <locale.h>
#include <stddef.h>
int main(void) { return ((int)(size_t)(void *)setlocale(LC_CTYPE, "")); }
'''

ac_test langinfo_codeset setlocale_ctype 0 'nl_langinfo(CODESET)' <<< '''
#include <langinfo.h>
#include <stddef.h>
int main(void) { return ((int)(size_t)(void *)nl_langinfo(CODESET)); }
'''

ac_test select <<< '''
#include <sys/types.h>
#if HAVE_BOTH_TIME_H
#include <sys/time.h>
#include <time.h>
#elif HAVE_SYS_TIME_H
#include <sys/time.h>
#elif HAVE_TIME_H
#include <time.h>
#endif
#if HAVE_SYS_BSDTYPES_H
#include <sys/bsdtypes.h>
#endif
#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#if HAVE_BSTRING_H
#include <bstring.h>
#endif
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#if HAVE_STRINGS_H
#include <strings.h>
#endif
#include <unistd.h>
int main(void) {
struct timeval tv = { 1, 200000 };
fd_set fds; FD_ZERO(&fds); FD_SET(0, &fds);
return (select(FD_SETSIZE, &fds, NULL, NULL, &tv));
}
'''

ac_test setresugid <<< '''
#include <sys/types.h>
#include <unistd.h>
int main(void) { return (setresuid(0,0,0) + setresgid(0,0,0)); }
'''

ac_test setgroups setresugid 0 <<< '''
#include <sys/types.h>
#if HAVE_GRP_H
#include <grp.h>
#endif
#include <unistd.h>
int main(void) { gid_t gid = 0; return (setgroups(0, &gid)); }
'''

if test x"$et" = x"klibc" {

	ac_testn __rt_sigsuspend '' 'whether klibc uses RT signals' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
extern int __rt_sigsuspend(const sigset_t *, size_t);
int main(void) { return (__rt_sigsuspend(NULL, 0)); }
'''

	# no? damn! legacy crap ahead!

	ac_testn __sigsuspend_s '!' __rt_sigsuspend 1 \
	    'whether sigsuspend is usable (1/2)' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
extern int __sigsuspend_s(sigset_t);
int main(void) { return (__sigsuspend_s(0)); }
'''
	ac_testn __sigsuspend_xxs '!' __sigsuspend_s 1 \
	    'whether sigsuspend is usable (2/2)' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
extern int __sigsuspend_xxs(int, int, sigset_t);
int main(void) { return (__sigsuspend_xxs(0, 0, 0)); }
'''

	if test "000" = "$HAVE___RT_SIGSUSPEND$HAVE___SIGSUSPEND_S$HAVE___SIGSUSPEND_XXS" {
		# no usable sigsuspend(), use pause() *ugh*
		add_cppflags -DMKSH_NO_SIGSUSPEND
	}
}

ac_test strerror '!' sys_errlist 0 <<< '''
extern char *strerror(int);
int main(int ac, char *av[]) { return (*strerror(*av[ac])); }
'''

ac_test strsignal '!' sys_siglist 0 <<< '''
#include <string.h>
#include <signal.h>
int main(void) { return (strsignal(1)[0]); }
'''

ac_test strlcpy <<< '''
#include <string.h>
int main(int ac, char *av[]) { return (strlcpy(*av, av[1],
    (size_t)ac)); }
'''

#
# check headers for declarations
#
ac_test flock_decl flock 1 'for declaration of flock()' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
#if HAVE_SYS_FILE_H
#include <sys/file.h>
#endif
int main(void) { return ((flock)(0, 0)); }
'''
ac_test revoke_decl revoke 1 'for declaration of revoke()' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
int main(void) { return ((revoke)("")); }
'''
ac_test sys_errlist_decl sys_errlist 0 "for declaration of sys_errlist[] and sys_nerr" <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
int main(void) { return (*sys_errlist[sys_nerr - 1] + isatty(0)); }
'''
ac_test sys_siglist_decl sys_siglist 0 'for declaration of sys_siglist[]' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
int main(void) { return (sys_siglist[0][0] + isatty(0)); }
'''

#
# other checks
#
setvar fd = ''if to use persistent history''
ac_cache PERSISTENT_HISTORY || case (HAVE_MMAP) {
11*|101 { setvar fv = '1' }
}
test 1 = $fv || setvar check_categories = ""$check_categories no-histfile""
ac_testdone
ac_cppflags

setvar save_CFLAGS = "$CFLAGS"
ac_testn compile_time_asserts_$$ '' 'whether compile-time assertions pass' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
#ifndef CHAR_BIT
#define CHAR_BIT 8	/* defuse this test on really legacy systems */
#endif
struct ctasserts {
#define cta(name, assertion) char name[(assertion) ? 1 : -1]
/* this one should be defined by the standard */
cta(char_is_1_char, (sizeof(char) == 1) && (sizeof(signed char) == 1) &&
    (sizeof(unsigned char) == 1));
cta(char_is_8_bits, ((CHAR_BIT) == 8) && ((int)(unsigned char)0xFF == 0xFF) &&
    ((int)(unsigned char)0x100 == 0) && ((int)(unsigned char)(int)-1 == 0xFF));
/* the next assertion is probably not really needed */
cta(short_is_2_char, sizeof(short) == 2);
cta(short_size_no_matter_of_signedness, sizeof(short) == sizeof(unsigned short));
/* the next assertion is probably not really needed */
cta(int_is_4_char, sizeof(int) == 4);
cta(int_size_no_matter_of_signedness, sizeof(int) == sizeof(unsigned int));

cta(long_ge_int, sizeof(long) >= sizeof(int));
cta(long_size_no_matter_of_signedness, sizeof(long) == sizeof(unsigned long));

#ifndef MKSH_LEGACY_MODE
/* the next assertion is probably not really needed */
cta(ari_is_4_char, sizeof(mksh_ari_t) == 4);
/* but this is */
cta(ari_has_31_bit, 0 < (mksh_ari_t)(((((mksh_ari_t)1 << 15) << 15) - 1) * 2 + 1));
/* the next assertion is probably not really needed */
cta(uari_is_4_char, sizeof(mksh_uari_t) == 4);
/* but the next three are; we REQUIRE unsigned integer wraparound */
cta(uari_has_31_bit, 0 < (mksh_uari_t)(((((mksh_uari_t)1 << 15) << 15) - 1) * 2 + 1));
cta(uari_has_32_bit, 0 < (mksh_uari_t)(((((mksh_uari_t)1 << 15) << 15) - 1) * 4 + 3));
cta(uari_wrap_32_bit,
    (mksh_uari_t)(((((mksh_uari_t)1 << 15) << 15) - 1) * 4 + 3) >
    (mksh_uari_t)(((((mksh_uari_t)1 << 15) << 15) - 1) * 4 + 4));
#define NUM 22
#else
#define NUM 16
#endif
/* these are always required */
cta(ari_is_signed, (mksh_ari_t)-1 < (mksh_ari_t)0);
cta(uari_is_unsigned, (mksh_uari_t)-1 > (mksh_uari_t)0);
/* we require these to have the precisely same size and assume 2s complement */
cta(ari_size_no_matter_of_signedness, sizeof(mksh_ari_t) == sizeof(mksh_uari_t));

cta(sizet_size_no_matter_of_signedness, sizeof(ssize_t) == sizeof(size_t));
cta(sizet_voidptr_same_size, sizeof(size_t) == sizeof(void *));
cta(sizet_funcptr_same_size, sizeof(size_t) == sizeof(void (*)(void)));
/* our formatting routines assume this */
cta(ptr_fits_in_long, sizeof(size_t) <= sizeof(long));
cta(ari_fits_in_long, sizeof(mksh_ari_t) <= sizeof(long));
/* for struct alignment people */
char padding[64 - NUM];
};
char ctasserts_dblcheck[sizeof(struct ctasserts) == 64 ? 1 : -1];
int main(void) { return (sizeof(ctasserts_dblcheck) + isatty(0)); }
'''
setvar CFLAGS = "$save_CFLAGS"
eval test 1 = '$'HAVE_COMPILE_TIME_ASSERTS_$$ || exit 1

#
# extra checks for legacy mksh
#
if test $legacy = 1 {
	ac_test long_32bit '' 'whether long is 32 bit wide' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
#ifndef CHAR_BIT
#define CHAR_BIT 0
#endif
struct ctasserts {
#define cta(name, assertion) char name[(assertion) ? 1 : -1]
cta(char_is_8_bits, (CHAR_BIT) == 8);
cta(long_is_32_bits, sizeof(long) == 4);
};
int main(void) { return (sizeof(struct ctasserts)); }
'''

	ac_test long_64bit '!' long_32bit 0 'whether long is 64 bit wide' <<< '''
#define MKSH_INCLUDES_ONLY
#include "sh.h"
#ifndef CHAR_BIT
#define CHAR_BIT 0
#endif
struct ctasserts {
#define cta(name, assertion) char name[(assertion) ? 1 : -1]
cta(char_is_8_bits, (CHAR_BIT) == 8);
cta(long_is_64_bits, sizeof(long) == 8);
};
int main(void) { return (sizeof(struct ctasserts)); }
'''

	case (HAVE_LONG_32BIT) {
	10 { setvar check_categories = ""$check_categories int:32"" }
	01 { setvar check_categories = ""$check_categories int:64"" }
	* { setvar check_categories = ""$check_categories int:u"" }
	}
}

#
# Compiler: Praeprocessor (only if needed)
#
test 0 = $HAVE_SYS_SIGNAME && if ac_testinit cpp_dd '' \
    'checking if the C Preprocessor supports -dD' {
	echo '#define foo bar' >conftest.c
	vv ']' "$CPP $CFLAGS $CPPFLAGS $NOWARN -dD conftest.c >x"
	grep '#define foo bar' x >/dev/null 2>&1 && setvar fv = '1'
	rmf conftest.c x vv.out
	ac_testdone
}

#
# End of mirtoconf checks
#
$e ... done.

# Some operating systems have ancient versions of ed(1) writing
# the character count to standard output; cope for that
echo wq >x
ed x <x 2>/dev/null | grep 3 >/dev/null 2>&1 && \
    setvar check_categories = ""$check_categories $oldish_ed""
rmf x vv.out

if test 0 = $HAVE_SYS_SIGNAME {
	if test 1 = $HAVE_CPP_DD {
		$e Generating list of signal names...
	} else {
		$e No list of signal names available via cpp. Falling back...
	}
	setvar sigseenone = ':'
	setvar sigseentwo = ':'
	echo '#include <signal.h>
#if defined(NSIG_MAX)
#define cfg_NSIG NSIG_MAX
#elif defined(NSIG)
#define cfg_NSIG NSIG
#elif defined(_NSIG)
#define cfg_NSIG _NSIG
#elif defined(SIGMAX)
#define cfg_NSIG (SIGMAX + 1)
#elif defined(_SIGMAX)
#define cfg_NSIG (_SIGMAX + 1)
#else
/*XXX better error out, see sh.h */
#define cfg_NSIG 64
#endif
int
mksh_cfg= cfg_NSIG
;' >conftest.c
	# GNU sed 2.03 segfaults when optimising this to sed -n
	setvar NSIG = $(vq "$CPP $CFLAGS $CPPFLAGS $NOWARN conftest.c" | \
	    grep -v '^#' | \
	    sed '/mksh_cfg.*= *$/{
		N
		s/\n/ /
		}' | \
	    grep '^ *mksh_cfg *=' | \
	    sed 's/^ *mksh_cfg *=[	 ]*\([()0-9x+-][()0-9x+	 -]*\).*$/\1/)
	case (NSIG) {
	*mksh_cfg* { $e "Error: NSIG='$NSIG'"; setvar NSIG = '0' }
	*[\ \(\)+-]* { setvar NSIG = $($AWK "BEGIN { print $NSIG }" ) }
	}
	setvar printf = 'printf'
	shell {printf hallo} >/dev/null 2>&1 || setvar printf = 'echo'
	test $printf = echo || test $(printf %d 42) = 42 || setvar printf = 'echo'
	test $printf = echo || setvar NSIG = $(printf %d $NSIG )
	$printf "NSIG=$NSIG ... "
	setvar sigs = ""ABRT FPE ILL INT SEGV TERM ALRM BUS CHLD CONT HUP KILL PIPE QUIT""
	setvar sigs = ""$sigs STOP TSTP TTIN TTOU USR1 USR2 POLL PROF SYS TRAP URG VTALRM""
	setvar sigs = ""$sigs XCPU XFSZ INFO WINCH EMT IO DIL LOST PWR SAK CLD IOT STKFLT""
	setvar sigs = ""$sigs ABND DCE DUMP IOERR TRACE DANGER THCONT THSTOP RESV UNUSED""
	test 1 = $HAVE_CPP_DD && test $NSIG -gt 1 && setvar sigs = ""$sigs "$(vq \
	    "$CPP $CFLAGS $CPPFLAGS $NOWARN -dD conftest.c" | \
	    grep '[	 ]SIG[A-Z0-9][A-Z0-9]*[	 ]' | \
	    sed 's/^.*[	 ]SIG\([A-Z0-9][A-Z0-9]*\)[	 ].*$/\1/' | sort)"
	test $NSIG -gt 1 || setvar sigs = ''
	for name in $sigs {
		case (sigseenone) {
		*:$name:* { continue }
		}
		setvar sigseenone = "$sigseenone$name:"
		echo '#include <signal.h>' >conftest.c
		echo int >>conftest.c
		echo mksh_cfg= SIG$name >>conftest.c
		echo ';' >>conftest.c
		# GNU sed 2.03 croaks on optimising this, too
		vq "$CPP $CFLAGS $CPPFLAGS $NOWARN conftest.c" | \
		    grep -v '^#' | \
		    sed '/mksh_cfg.*= *$/{
			N
			s/\n/ /
			}' | \
		    grep '^ *mksh_cfg *=' | \
		    sed 's/^ *mksh_cfg *=[	 ]*\([0-9][0-9x]*\).*$/:\1 '$name/
	} | sed -n '/^:[^ ]/s/^://p' | while read nr name {
		test $printf = echo || setvar nr = $(printf %d $nr )
		test $nr -gt 0 && test $nr -lt $NSIG || continue
		case (sigseentwo) {
		*:$nr:* { }
		* {	echo "		{ \"$name\", $nr },"
			setvar sigseentwo = "$sigseentwo$nr:"
			$printf "$name=$nr " >&2
			}
		}
	} 2>&1 >signames.inc
	rmf conftest.c
	$e done.
}

addsrcs '!' HAVE_STRLCPY strlcpy.c
addsrcs USE_PRINTF_BUILTIN printf.c
test 1 = $USE_PRINTF_BUILTIN && add_cppflags -DMKSH_PRINTF_BUILTIN
test 1 = $HAVE_CAN_VERB && setvar CFLAGS = ""$CFLAGS -verbose""
add_cppflags -DMKSH_BUILD_R=523

$e $bi$me: Finished configuration testing, now producing output.$ao

setvar files = ''
setvar objs = ''
setvar sp = ''
case (tcfn) {
a.exe|conftest.exe {
	setvar mkshexe = "$tfn.exe"
	add_cppflags -DMKSH_EXE_EXT
	}
* {
	setvar mkshexe = "$tfn"
	}
}
case (curdir) {
*\ * {	setvar mkshshebang = ""#!./$mkshexe"" }
* {	setvar mkshshebang = ""#!$curdir/$mkshexe"" }
}
cat >test.sh <<< """
$mkshshebang
LC_ALL=C PATH='$PATH'; export LC_ALL PATH
test -n "'$'KSH_VERSION" || exit 1
set -A check_categories -- $check_categories
pflag='$curdir/$mkshexe'
sflag='$srcdir/check.t'
usee=0 Pflag=0 Sflag=0 uset=0 vflag=1 xflag=0
while getopts "C:e:fPp:QSs:t:v" ch; do case '$'ch {
(C)	check_categories['$'{#check_categories[*]}]='$'OPTARG ;;
(e)	usee=1; eflag='$'OPTARG ;;
(f)	check_categories['$'{#check_categories[*]}]=fastbox ;;
(P)	Pflag=1 ;;
(+P)	Pflag=0 ;;
(p)	pflag='$'OPTARG ;;
(Q)	vflag=0 ;;
(+Q)	vflag=1 ;;
(S)	Sflag=1 ;;
(+S)	Sflag=0 ;;
(s)	sflag='$'OPTARG ;;
(t)	uset=1; tflag='$'OPTARG ;;
(v)	vflag=1 ;;
(+v)	vflag=0 ;;
(*)	xflag=1 ;;
}
done
shift '$'((OPTIND - 1))
set -A args -- '$srcdir/check.pl' -p "'$'pflag"
x=
for y in "'$'{check_categories[@]}"; do
x='$'x,'$'y
done
if [[ -n '$'x ]]; then
args['$'{#args[*]}]=-C
args['$'{#args[*]}]='$'{x#,}
fi
if (( usee )); then
args['$'{#args[*]}]=-e
args['$'{#args[*]}]='$'eflag
fi
(( Pflag )) && args['$'{#args[*]}]=-P
if (( uset )); then
args['$'{#args[*]}]=-t
args['$'{#args[*]}]='$'tflag
fi
(( vflag )) && args['$'{#args[*]}]=-v
(( xflag )) && args['$'{#args[*]}]=-x	# force usage by synerr
if [[ -n '$'TMPDIR && -d '$'TMPDIR/. ]]; then
args['$'{#args[*]}]=-T
args['$'{#args[*]}]='$'TMPDIR
fi
print Testing mksh for conformance:
grep -F -e Mir''OS: -e MIRBSD "'$'sflag"
print "This shell is actually:'\'n'\'t'$'KSH_VERSION"
print 'test.sh built for mksh $dstversion'
cstr=''$'os = defined '$'^O ? '$'^O : "unknown";'
cstr="'$'cstr"'print '$'os . ", Perl version " . '$'];'
for perli in '$'PERL perl5 perl no; do
if [[ '$'perli = no ]]; then
print Cannot find a working Perl interpreter, aborting.
exit 1
fi
print "Trying Perl interpreter ''$'perli'..."
perlos='$'('$'perli -e "'$'cstr")
rv='$'?
print "Errorlevel '$'rv, running on ''$'perlos'"
if (( rv )); then
print "=> not using"
continue
fi
if [[ -n '$'perlos ]]; then
print "=> using it"
break
fi
done
(( Sflag )) || echo + '$'perli "'$'{args[@]}" -s "'$'sflag" "'$'@"
(( Sflag )) || exec '$'perli "'$'{args[@]}" -s "'$'sflag" "'$'@"$tsts
# use of the -S option for check.t split into multiple chunks
rv=0
for s in "'$'sflag".*; do
echo + '$'perli "'$'{args[@]}" -s "'$'s" "'$'@"
'$'perli "'$'{args[@]}" -s "'$'s" "'$'@"$tsts
rc='$'?
(( rv = rv ? rv : rc ))
done
exit '$'rv
"""
chmod 755 test.sh
case (cm) {
dragonegg {
	setvar emitbc = ""-S -flto""
	}
llvm {
	setvar emitbc = ""-emit-llvm -c""
	}
* {
	setvar emitbc = '-c'
	}
}
echo ": # work around NeXTstep bug" >Rebuild.sh
cd $srcdir
setvar optfiles = $(echo *.opt)
cd $curdir
for file in $optfiles {
	echo "echo + Running genopt on '$file'..."
	echo "(srcfile='$srcdir/$file'; BUILDSH_RUN_GENOPT=1; . '$srcdir/Build.sh')"
} >>Rebuild.sh
echo set -x >>Rebuild.sh
for file in $SRCS {
	setvar op = $(echo x"$file" | sed 's/^x\(.*\)\.c$/\1./)
	test -f $file || setvar file = "$srcdir/$file"
	setvar files = ""$files$sp$file""
	setvar sp = '' ''
	echo "$CC $CFLAGS $CPPFLAGS $emitbc $file || exit 1" >>Rebuild.sh
	if test $cm = dragonegg {
		echo "mv ${op}s ${op}ll" >>Rebuild.sh
		echo "llvm-as ${op}ll || exit 1" >>Rebuild.sh
		setvar objs = ""$objs$sp${op}bc""
	} else {
		setvar objs = ""$objs$sp${op}o""
	}
}
case (cm) {
dragonegg|llvm {
	echo "rm -f $tfn.s" >>Rebuild.sh
	echo "llvm-link -o - $objs | opt $optflags | llc -o $tfn.s" >>Rebuild.sh
	setvar lobjs = "$tfn.s"
	}
* {
	setvar lobjs = "$objs"
	}
}
echo tcfn=$mkshexe >>Rebuild.sh
echo "$CC $CFLAGS $LDFLAGS -o \$tcfn $lobjs $LIBS $ccpr" >>Rebuild.sh
echo "test -f \$tcfn || exit 1; $SIZE \$tcfn" >>Rebuild.sh
if test $cm = makefile {
	setvar extras = ''emacsfn.h rlimits.opt sh.h sh_flags.opt var_spec.h''
	test 0 = $HAVE_SYS_SIGNAME && setvar extras = ""$extras signames.inc""
	setvar gens = '', genq = ''
	for file in $optfiles {
		setvar genf = $(basename $file | sed 's/.opt$/.gen/)
		setvar gens = ""$gens $genf""
		setvar genq = ""$genq$nl$genf: $srcdir/Build.sh $srcdir/$file
			srcfile=$srcdir/$file; BUILDSH_RUN_GENOPT=1; . $srcdir/Build.sh""
	}
	cat >Makefrag.inc <<< """
# Makefile fragment for building mksh $dstversion

PROG=		$mkshexe
MAN=		mksh.1
SRCS=		$SRCS
SRCS_FP=	$files
OBJS_BP=	$objs
INDSRCS=	$extras
NONSRCS_INST=	dot.mkshrc '$'(MAN)
NONSRCS_NOINST=	Build.sh Makefile Rebuild.sh check.pl check.t test.sh
CC=		$CC
CFLAGS=		$CFLAGS
CPPFLAGS=	$CPPFLAGS
LDFLAGS=	$LDFLAGS
LIBS=		$LIBS

.depend '$'(OBJS_BP):$gens$genq

# not BSD make only:
#VPATH=		$srcdir
#all: '$'(PROG)
#'$'(PROG): '$'(OBJS_BP)
#	'$'(CC) '$'(CFLAGS) '$'(LDFLAGS) -o '$'@ '$'(OBJS_BP) '$'(LIBS)
#'$'(OBJS_BP): '$'(SRCS_FP) '$'(NONSRCS)
#.c.o:
#	'$'(CC) '$'(CFLAGS) '$'(CPPFLAGS) -c '$'<

# for all make variants:
#REGRESS_FLAGS=	-f
#regress:
#	./test.sh '$'(REGRESS_FLAGS)
check_categories=$check_categories

# for BSD make only:
#.PATH: $srcdir
#.include <bsd.prog.mk>
"""
	$e
	$e Generated Makefrag.inc successfully.
	exit 0
}
for file in $optfiles {
	$e "+ Running genopt on '$file'..."
	do_genopt "$srcdir/$file" || exit 1
}
if test $cm = combine {
	setvar objs = ""-o $mkshexe""
	for file in $SRCS {
		test -f $file || setvar file = "$srcdir/$file"
		setvar objs = ""$objs $file""
	}
	setvar emitbc = ""-fwhole-program --combine""
	v "$CC $CFLAGS $CPPFLAGS $LDFLAGS $emitbc $objs $LIBS $ccpr"
} elif test 1 = $pm {
	for file in $SRCS {
		test -f $file || setvar file = "$srcdir/$file"
		v "$CC $CFLAGS $CPPFLAGS $emitbc $file" &
	}
	wait
} else {
	for file in $SRCS {
		test $cm = dragonegg && \
		    setvar op = $(echo x"$file" | sed 's/^x\(.*\)\.c$/\1./)
		test -f $file || setvar file = "$srcdir/$file"
		v "$CC $CFLAGS $CPPFLAGS $emitbc $file" || exit 1
		if test $cm = dragonegg {
			v "mv ${op}s ${op}ll"
			v "llvm-as ${op}ll" || exit 1
		}
	}
}
case (cm) {
dragonegg|llvm {
	rmf $tfn.s
	v "llvm-link -o - $objs | opt $optflags | llc -o $tfn.s"
	}
}
setvar tcfn = "$mkshexe"
test $cm = combine || v "$CC $CFLAGS $LDFLAGS -o $tcfn $lobjs $LIBS $ccpr"
test -f $tcfn || exit 1
test 1 = $r || v "$NROFF -mdoc <'$srcdir/mksh.1' >$tfn.cat1" || \
    rmf $tfn.cat1
test 0 = $eq && v $SIZE $tcfn
setvar i = 'install'
test -f /usr/ucb/$i && setvar i = "/usr/ucb/$i"
test 1 = $eq && setvar e = ':'
$e
$e Installing the shell:
$e "# $i -c -s -o root -g bin -m 555 $tfn /bin/$tfn"
if test $legacy = 0 {
	$e "# grep -x /bin/$tfn /etc/shells >/dev/null || echo /bin/$tfn >>/etc/shells"
	$e "# $i -c -o root -g bin -m 444 dot.mkshrc /usr/share/doc/mksh/examples/"
}
$e
$e Installing the manual:
if test -f $tfn.cat1 {
	$e "# $i -c -o root -g bin -m 444 $tfn.cat1" \
	    "/usr/share/man/cat1/$tfn.0"
	$e or
}
$e "# $i -c -o root -g bin -m 444 $tfn.1 /usr/share/man/man1/$tfn.1"
$e
$e Run the regression test suite: ./test.sh
$e Please also read the sample file dot.mkshrc and the fine manual.
exit 0

: <<< '''

=== Environment used ===

==== build environment ====
AWK				default: awk
CC				default: cc
CFLAGS				if empty, defaults to -xO2 or +O2
				or -O3 -qstrict or -O2, per compiler
CPPFLAGS			default empty
LDFLAGS				default empty; added before sources
LDSTATIC			set this to '-static'; default unset
LIBS				default empty; added after sources
				[Interix] default: -lcrypt (XXX still needed?)
NOWARN				-Wno-error or similar
NROFF				default: nroff
TARGET_OS			default: $(uname -s || uname)
TARGET_OSREV			[QNX] default: $(uname -r)

==== feature selectors ====
USE_PRINTF_BUILTIN		1 to include (unsupported) printf(1) as builtin
===== general format =====
HAVE_STRLEN			ac_test
HAVE_STRING_H			ac_header
HAVE_CAN_FSTACKPROTECTORALL	ac_flags

==== cpp definitions ====
DEBUG				dont use in production, wants gcc, implies:
DEBUG_LEAKS			enable freeing resources before exiting
MKSHRC_PATH			"~/.mkshrc" (do not change)
MKSH_A4PB			force use of arc4random_pushb
MKSH_ASSUME_UTF8		(0=disabled, 1=enabled; default: unset)
MKSH_BINSHPOSIX			if */sh or */-sh, enable set -o posix
MKSH_BINSHREDUCED		if */sh or */-sh, enable set -o sh
MKSH_CLRTOEOL_STRING		"\033[K"
MKSH_CLS_STRING			"\033[;H\033[J"
MKSH_CONSERVATIVE_FDS		fd 0-9 for scripts, shell only up to 31
MKSH_DEFAULT_EXECSHELL		"/bin/sh" (do not change)
MKSH_DEFAULT_PROFILEDIR		"/etc" (do not change)
MKSH_DEFAULT_TMPDIR		"/tmp" (do not change)
MKSH_DISABLE_DEPRECATED		disable code paths scheduled for later removal
MKSH_DISABLE_EXPERIMENTAL	disable code not yet comfy for (LTS) snapshots
MKSH_DISABLE_TTY_WARNING	shut up warning about ctty if OS cant be fixed
MKSH_DONT_EMIT_IDSTRING		omit RCS IDs from binary
MKSH_MIDNIGHTBSD01ASH_COMPAT	set -o sh: additional compatibility quirk
MKSH_NOPROSPECTOFWORK		disable jobs, co-processes, etc. (do not use)
MKSH_NOPWNAM			skip PAM calls, for -static on glibc or Solaris
MKSH_NO_CMDLINE_EDITING		disable command line editing code entirely
MKSH_NO_DEPRECATED_WARNING	omit warning when deprecated stuff is run
MKSH_NO_LIMITS			omit ulimit code
MKSH_NO_SIGSETJMP		define if sigsetjmp is broken or not available
MKSH_NO_SIGSUSPEND		use sigprocmask+pause instead of sigsuspend
MKSH_SMALL			omit some code, optimise hard for size (slower)
MKSH_SMALL_BUT_FAST		disable some hard-for-size optim. (modern sys.)
MKSH_S_NOVI=1			disable Vi editing mode (default if MKSH_SMALL)
MKSH_TYPEDEF_SIG_ATOMIC_T	define to e.g. 'int' if sig_atomic_t is missing
MKSH_TYPEDEF_SSIZE_T		define to e.g. 'long' if your OS has no ssize_t
MKSH_UNEMPLOYED			disable job control (but not jobs/co-processes)

=== generic installation instructions ===

Set CC and possibly CFLAGS, CPPFLAGS, LDFLAGS, LIBS. If cross-compiling,
also set TARGET_OS. To disable tests, set e.g. HAVE_STRLCPY=0; to enable
them, set to a value other than 0 or 1. Ensure /bin/ed is installed. For
MKSH_SMALL but with Vi mode, add -DMKSH_S_NOVI=0 to CPPFLAGS as well.

Normally, the following command is what you want to run, then:
$ (sh Build.sh -r -c lto && ./test.sh -f) 2>&1 | tee log

Copy dot.mkshrc to /etc/skel/.mkshrc; install mksh into $prefix/bin; or
/bin; install the manpage, if omitting the -r flag a catmanpage is made
using $NROFF. Consider using a forward script as /etc/skel/.mkshrc like
http://anonscm.debian.org/cgit/collab-maint/mksh.git/plain/debian/.mkshrc
and put dot.mkshrc as /etc/mkshrc so users need not keep up their HOME.

You may also want to install the lksh binary (also as /bin/sh) built by:
$ CPPFLAGS="$CPPFLAGS -DMKSH_BINSHPOSIX" sh Build.sh -L -r -c lto

'''
    (DONE benchmarks/testdata/Build.sh)
#!/usr/bin/env bash

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# This contains util code for testing kubectl.

set -o errexit
set -o nounset
set -o pipefail

# Set locale to ensure english responses from kubectl commands
export LANG=C

setvar KUBE_ROOT = "$(dirname "${BASH_SOURCE}")/../.."
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"

setvar ETCD_HOST = ${ETCD_HOST:-127.0.0.1}
setvar ETCD_PORT = ${ETCD_PORT:-2379}
setvar API_PORT = ${API_PORT:-8080}
setvar SECURE_API_PORT = ${SECURE_API_PORT:-6443}
setvar API_HOST = ${API_HOST:-127.0.0.1}
setvar KUBELET_HEALTHZ_PORT = ${KUBELET_HEALTHZ_PORT:-10248}
setvar CTLRMGR_PORT = ${CTLRMGR_PORT:-10252}
setvar PROXY_HOST = '127.0.0.1' # kubectl only serves on localhost.

setvar IMAGE_NGINX = ""gcr.io/google-containers/nginx:1.7.9""
setvar IMAGE_DEPLOYMENT_R1 = ""gcr.io/google-containers/nginx:test-cmd""  # deployment-revision1.yaml
setvar IMAGE_DEPLOYMENT_R2 = "$IMAGE_NGINX"  # deployment-revision2.yaml
setvar IMAGE_PERL = ""gcr.io/google-containers/perl""
setvar IMAGE_PAUSE_V2 = ""gcr.io/google-containers/pause:2.0""
setvar IMAGE_DAEMONSET_R2 = ""gcr.io/google-containers/pause:latest""
setvar IMAGE_DAEMONSET_R2_2 = ""gcr.io/google-containers/nginx:test-cmd""  # rollingupdate-daemonset-rv2.yaml
setvar IMAGE_STATEFULSET_R1 = ""gcr.io/google_containers/nginx-slim:0.7""
setvar IMAGE_STATEFULSET_R2 = ""gcr.io/google_containers/nginx-slim:0.8""

# Expose kubectl directly for readability
setvar PATH = ""${KUBE_OUTPUT_HOSTBIN}":$PATH"

# Define variables for resource types to prevent typos.
setvar clusterroles = ""clusterroles""
setvar configmaps = ""configmaps""
setvar csr = ""csr""
setvar deployments = ""deployments""
setvar horizontalpodautoscalers = ""horizontalpodautoscalers""
setvar metrics = ""metrics""
setvar namespaces = ""namespaces""
setvar nodes = ""nodes""
setvar persistentvolumeclaims = ""persistentvolumeclaims""
setvar persistentvolumes = ""persistentvolumes""
setvar pods = ""pods""
setvar podtemplates = ""podtemplates""
setvar replicasets = ""replicasets""
setvar replicationcontrollers = ""replicationcontrollers""
setvar roles = ""roles""
setvar secrets = ""secrets""
setvar serviceaccounts = ""serviceaccounts""
setvar services = ""services""
setvar statefulsets = ""statefulsets""
setvar static = ""static""
setvar storageclass = ""storageclass""
setvar subjectaccessreviews = ""subjectaccessreviews""
setvar selfsubjectaccessreviews = ""selfsubjectaccessreviews""
setvar customresourcedefinitions = ""customresourcedefinitions""
setvar daemonsets = ""daemonsets""
setvar controllerrevisions = ""controllerrevisions""


# include shell2junit library
setvar sh2ju = ""${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh""
if [[ -f "${sh2ju}" ]] {
  source ${sh2ju}
} else {
  echo "failed to find third_party/forked/shell2junit/sh2ju.sh"
  exit 1
}

# record_command runs the command and records its output/error messages in junit format
# it expects the first to be the name of the command
# Example:
# record_command run_kubectl_tests
#
# WARNING: Variable changes in the command will NOT be effective after record_command returns.
#          This is because the command runs in subshell.
proc record_command {
    set +o nounset
    set +o errexit

    local name="$1"
    local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
    echo "Recording: ${name}"
    echo "Running command: $[join(ARGV)]"
    juLog -output="${output}" -class="test-cmd" -name="${name}" @ARGV
    if [[ $? -ne 0 ]] {
      echo "Error when running ${name}"
      setvar foundError = ""True""
    }

    set -o nounset
    set -o errexit
}

# Stops the running kubectl proxy, if there is one.
proc stop-proxy {
  [[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
  [[ -n "${PROXY_PID-}" ]] && kill ${PROXY_PID} 1>&2 2>/dev/null
  [[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
  setvar PROXY_PID = ''
  setvar PROXY_PORT = ''
  setvar PROXY_PORT_FILE = ''
}

# Starts "kubect proxy" to test the client proxy. $1: api_prefix
proc start-proxy {
  stop-proxy

  setvar PROXY_PORT_FILE = $(mktemp proxy-port.out.XXXXX)
  kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"


  if test $Argc -eq 0 {
    kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
  } else {
    kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
  }
  setvar PROXY_PID = ""$!
  setvar PROXY_PORT = ''

  local attempts=0 {
    if (( ${attempts} > 9 )) {
      kill ${PROXY_PID}
      kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
    }
    sleep .5
    kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
    setvar PROXY_PORT = $(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
    setvar attempts = $((attempts+1))
  }

  kube::log::status "kubectl proxy running on port ${PROXY_PORT}"

  # We try checking kubectl proxy 30 times with 1s delays to avoid occasional
  # failures.
  if test $Argc -eq 0 {
    kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
  } else {
    kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
  }
}

proc cleanup {
  [[ -n "${APISERVER_PID-}" ]] && kill ${APISERVER_PID} 1>&2 2>/dev/null
  [[ -n "${CTLRMGR_PID-}" ]] && kill ${CTLRMGR_PID} 1>&2 2>/dev/null
  [[ -n "${KUBELET_PID-}" ]] && kill ${KUBELET_PID} 1>&2 2>/dev/null
  stop-proxy

  kube::etcd::cleanup
  rm -rf ${KUBE_TEMP}

  local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
  echo "junit report dir:" ${junit_dir}

  kube::log::status "Clean up complete"
}

# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
proc check-curl-proxy-code {
  local status
  local -r address=$1
  local -r desired=$2
  local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
  setvar status = $(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
  if test ${status} == ${desired} {
    return 0
  }
  echo "For address ${full_address}, got ${status} but wanted ${desired}"
  return 1
}

# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
proc kubectl-with-retry {
  setvar ERROR_FILE = ""${KUBE_TEMP}/kubectl-error""
  setvar preserve_err_file = ${PRESERVE_ERR_FILE-false}
  for count in {0..3} {
    kubectl @ARGV 2> ${ERROR_FILE} || true
    if grep -q "the object has been modified" ${ERROR_FILE} {
      kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
      rm ${ERROR_FILE}
      sleep $((2**count))
    } else {
      if test $preserve_err_file != true  {
        rm ${ERROR_FILE}
      }
      break
    }
  }
}

# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
proc wait-for-pods-with-label {
  local i
  for i in $(seq 1 10) {
    setvar kubeout = $(kubectl get po -l $1 --template '{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name ${kube_flags[@]})
    if [[ $kubeout = $2 ]] {
        return
    }
    echo Waiting for pods: $2, found $kubeout
    sleep $i
  }
  kube::log::error_exit "Timeout waiting for pods with label $1"
}

# Code to be run before running the tests.
proc setup {
  kube::util::trap_add cleanup EXIT SIGINT
  kube::util::ensure-temp-dir
  # ensure ~/.kube/config isn't loaded by tests
  setvar HOME = "${KUBE_TEMP}"

  kube::etcd::start

  # Find a standard sed instance for use with edit scripts
  setvar SED = 'sed'
  if which gsed &>/dev/null {
    setvar SED = 'gsed'
  }
  if ! shell {$SED --version 2>&1 | grep -q GNU} {
    echo "!!! GNU sed is required.  If on OS X, use 'brew install gnu-sed'."
    exit 1
  }

  kube::log::status "Building kubectl"
  make -C ${KUBE_ROOT} WHAT="cmd/kubectl"

  # Check kubectl
  kube::log::status "Running kubectl with no options"
  "${KUBE_OUTPUT_HOSTBIN}/kubectl"

  # TODO: we need to note down the current default namespace and set back to this
  # namespace after the tests are done.
  kubectl config view
  setvar CONTEXT = ""test""
  kubectl config set-context ${CONTEXT}
  kubectl config use-context ${CONTEXT}

  kube::log::status "Setup complete"
}

########################################################
# Kubectl version (--short, --client, --output) #
########################################################
proc run_kubectl_version_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl version"
  setvar TEMP = "${KUBE_TEMP}"

  kubectl get ${kube_flags[@]} --raw /version

  # create version files, one for the client, one for the server.
  # these are the files we will use to ensure that the remainder output is correct
  kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
  kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"

  kube::log::status "Testing kubectl version: check client only output matches expected output"
  kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
  kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
  kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
  kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"

  kube::log::status "Testing kubectl version: verify json output"
  kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
  kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
  kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
  kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"

  kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
  kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
  kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
  kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
  kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"

  kube::log::status "Testing kubectl version: compare json output using additional --short flag"
  kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
  kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
  kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
  kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"

  kube::log::status "Testing kubectl version: compare json output with yaml output"
  kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
  kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
  kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"

  set +o nounset
  set +o errexit
}

# Runs all pod related tests.
proc run_pod_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl(v1:pods)"

  ### Create POD valid-pod from JSON
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  # Post-condition: valid-pod POD is created
  kubectl get ${kube_flags[@]} pods -o json
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
  kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
  kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
  # Repeat above test using jsonpath template
  kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
  kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
  kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
  kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
  # Describe command should print detailed information
  kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
  # Describe command should print events information by default
  kube::test::describe_object_events_assert pods 'valid-pod'
  # Describe command should not print events information when show-events=false
  kube::test::describe_object_events_assert pods 'valid-pod' false
  # Describe command should print events information when show-events=true
  kube::test::describe_object_events_assert pods 'valid-pod' true
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"

  # Describe command should print events information by default
  kube::test::describe_resource_events_assert pods
  # Describe command should not print events information when show-events=false
  kube::test::describe_resource_events_assert pods false
  # Describe command should print events information when show-events=true
  kube::test::describe_resource_events_assert pods true
  ### Validate Export ###
  kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"

  ### Dump current valid-pod POD
  setvar output_pod = $(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")

  ### Delete POD valid-pod by id
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete pod valid-pod ${kube_flags[@]} --grace-period=0 --force
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Delete POD valid-pod by id with --now
  # Pre-condition: valid-pod POD exists
  kubectl create ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete pod valid-pod ${kube_flags[@]} --now
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Delete POD valid-pod by id with --grace-period=0
  # Pre-condition: valid-pod POD exists
  kubectl create ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command succeeds without --force by waiting
  kubectl delete pod valid-pod ${kube_flags[@]} --grace-period=0
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create POD valid-pod from dumped YAML
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  echo ${output_pod} | $SED '/namespace:/d' | kubectl create -f - ${kube_flags[@]}
  # Post-condition: valid-pod POD is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Delete POD valid-pod from JSON
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]} --grace-period=0 --force
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create POD valid-pod from JSON
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  # Post-condition: valid-pod POD is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Delete POD valid-pod with label
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
  # Command
  kubectl delete pods -l'name in (valid-pod)' ${kube_flags[@]} --grace-period=0 --force
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''

  ### Create POD valid-pod from YAML
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  # Post-condition: valid-pod POD is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Delete PODs with no parameter mustn't kill everything
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  ! kubectl delete pods ${kube_flags[@]}
  # Post-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Delete PODs with --all and a label selector is not permitted
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  ! kubectl delete --all pods -l'name in (valid-pod)' ${kube_flags[@]}
  # Post-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Delete all PODs
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete --all pods ${kube_flags[@]} --grace-period=0 --force # --all remove all the pods
  # Post-condition: no POD exists
  kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''

  # Detailed tests for describe pod output
    ### Create a new namespace
  # Pre-condition: the test-secrets namespace does not exist
  kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':'
  # Command
  kubectl create namespace test-kubectl-describe-pod
  # Post-condition: namespace 'test-secrets' is created.
  kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'

  ### Create a generic secret
  # Pre-condition: no SECRET exists
  kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
  # Post-condition: secret exists and has expected values
  kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
  kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type'

  ### Create a generic configmap
  # Pre-condition: no CONFIGMAP exists
  kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
  # Post-condition: configmap exists and has expected values
  kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'

  ### Create a pod disruption budget with minAvailable
  # Command
  kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
  # Post-condition: pdb exists and has expected values
  kube::test::get_object_assert 'pdb/test-pdb-1 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '2'
  # Command
  kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
  # Post-condition: pdb exists and has expected values
  kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'

  ### Create a pod disruption budget with maxUnavailable
  # Command
  kubectl create pdb test-pdb-3 --selector=app=rails --max-unavailable=2 --namespace=test-kubectl-describe-pod
  # Post-condition: pdb exists and has expected values
  kube::test::get_object_assert 'pdb/test-pdb-3 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '2'
  # Command
  kubectl create pdb test-pdb-4 --selector=app=rails --max-unavailable=50% --namespace=test-kubectl-describe-pod
  # Post-condition: pdb exists and has expected values
  kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'

  ### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
  ! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod

  # Create a pod that consumes secret, configmap, and downward API keys as envs
  kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
  kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod

  kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
  # Describe command (resource only) should print detailed information about environment variables
  kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"

  # Clean-up
  kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
  kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
  kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
  kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
  kubectl delete namespace test-kubectl-describe-pod

  ### Create two PODs
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  kubectl create -f examples/storage/redis/redis-master.yaml ${kube_flags[@]}
  # Post-condition: valid-pod and redis-master PODs are created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'

  ### Delete multiple PODs at once
  # Pre-condition: valid-pod and redis-master PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
  # Command
  kubectl delete pods valid-pod redis-master ${kube_flags[@]} --grace-period=0 --force # delete multiple pods at once
  # Post-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create valid-pod POD
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  # Post-condition: valid-pod POD is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Label the valid-pod POD
  # Pre-condition: valid-pod is not labelled
  kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
  # Command
  kubectl label pods valid-pod new-name=new-valid-pod ${kube_flags[@]}
  # Post-condition: valid-pod is labelled
  kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'

  ### Label the valid-pod POD with empty label value
  # Pre-condition: valid-pod does not have label "emptylabel"
  kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
  # Command
  kubectl label pods valid-pod emptylabel="" ${kube_flags[@]}
  # Post-condition: valid pod contains "emptylabel" with no value
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''

  ### Annotate the valid-pod POD with empty annotation value
  # Pre-condition: valid-pod does not have annotation "emptyannotation"
  kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '<no value>'
  # Command
  kubectl annotate pods valid-pod emptyannotation="" ${kube_flags[@]}
  # Post-condition: valid pod contains "emptyannotation" with no value
  kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''

  ### Record label change
  # Pre-condition: valid-pod does not have record annotation
  kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
  # Command
  kubectl label pods valid-pod record-change=true --record=true ${kube_flags[@]}
  # Post-condition: valid-pod has record annotation
  kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"

  ### Do not record label change
  # Command
  kubectl label pods valid-pod no-record-change=true --record=false ${kube_flags[@]}
  # Post-condition: valid-pod's record annotation still contains command with --record=true
  kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"

  ### Record label change with unspecified flag and previous change already recorded
  # Command
  kubectl label pods valid-pod new-record-change=true ${kube_flags[@]}
  # Post-condition: valid-pod's record annotation contains new change
  kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"


  ### Delete POD by label
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force ${kube_flags[@]}
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create pod-with-precision POD
  # Pre-condition: no POD is running
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/pod-with-precision.json ${kube_flags[@]}
  # Post-condition: valid-pod POD is running
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'

  ## Patch preserves precision
  # Command
  kubectl patch ${kube_flags[@]} pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
  # Post-condition: pod-with-precision POD has patched annotation
  kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
  # Command
  kubectl label pods pod-with-precision labelkey=labelvalue ${kube_flags[@]}
  # Post-condition: pod-with-precision POD has label
  kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
  # Command
  kubectl annotate pods pod-with-precision annotatekey=annotatevalue ${kube_flags[@]}
  # Post-condition: pod-with-precision POD has annotation
  kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
  # Cleanup
  kubectl delete pod pod-with-precision ${kube_flags[@]}

  ### Annotate POD YAML file locally without effecting the live pod.
  kubectl create -f hack/testdata/pod.yaml ${kube_flags[@]}
  # Command
  kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue ${kube_flags[@]}

  # Pre-condition: annotationkey is annotationvalue
  kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'

  # Command
  setvar output_message = $(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
  echo $output_message

  # Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
  kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
  kube::test::if_has_string ${output_message} "localvalue"

  # Cleanup
  kubectl delete -f hack/testdata/pod.yaml ${kube_flags[@]}

  ### Create valid-pod POD
  # Pre-condition: no services and no rcs exist
  kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  ## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
  setvar TEMP = $(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
  echo -e "#!/bin/bash\n$SED -i \"s/mock/modified/g\" \$1" > ${TEMP}
  chmod +x ${TEMP}
  # Command
  EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json ${kube_flags[@]}
  # Post-condition: service named modified and rc named modified are created
  kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  # Clean up
  kubectl delete service/modified ${kube_flags[@]}
  kubectl delete rc/modified ${kube_flags[@]}

  # Pre-condition: no services and no rcs exist
  kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json ${kube_flags[@]}
  # Post-condition: service named modified and rc named modified are created
  kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
  # Clean up
  rm ${TEMP}
  kubectl delete service/modified ${kube_flags[@]}
  kubectl delete rc/modified ${kube_flags[@]}

  ## kubectl create --edit won't create anything if user makes no changes
  test $(EDITOR=cat kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1 | grep 'Edit cancelled')

  ## Create valid-pod POD
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  # Post-condition: valid-pod POD is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ## Patch can modify a local object
  kubectl patch --local -f pkg/kubectl/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never"

  ## Patch fails with error message "not patched" and exit code 1
  setvar output_message = $(! kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"replicas":7}}' 2>&1)
  kube::test::if_has_string ${output_message} 'not patched'

  ## Patch pod can change image
  # Command
  kubectl patch ${kube_flags[@]} pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
  # Post-condition: valid-pod POD has image nginx
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  # Post-condition: valid-pod has the record annotation
  kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" ${change_cause_annotation}
  # prove that patch can use different types
  kubectl patch ${kube_flags[@]} pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
  # Post-condition: valid-pod POD has image nginx
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
  # prove that patch can use different types
  kubectl patch ${kube_flags[@]} pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
  # Post-condition: valid-pod POD has image nginx
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  # prove that yaml input works too
  setvar YAML_PATCH = '$'spec:\n  containers:\n  - name: kubernetes-serve-hostname\n    image: changed-with-yaml\n''
  kubectl patch ${kube_flags[@]} pod valid-pod -p="${YAML_PATCH}"
  # Post-condition: valid-pod POD has image nginx
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
  ## Patch pod from JSON can change image
  # Command
  kubectl patch ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}'
  # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:'

  ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
  setvar ERROR_FILE = ""${KUBE_TEMP}/conflict-error""
  ## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
  # Command
  # Needs to retry because other party may change the resource.
  for count in {0..3} {
    setvar resourceVersion = $(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
    kubectl patch ${kube_flags[@]} pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
    if grep -q "the object has been modified" ${ERROR_FILE} {
      kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
      rm ${ERROR_FILE}
      sleep $((2**count))
    } else {
      rm ${ERROR_FILE}
      kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
      break
    }
  }

  ## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
  setvar resourceVersion = $(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
  ((resourceVersion+=100))
  # Command
  kubectl patch ${kube_flags[@]} pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
  # Post-condition: should get an error reporting the conflict
  if grep -q "please apply your changes to the latest version and try again" ${ERROR_FILE} {
    kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
  } else {
    kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
    exit 1
  }
  rm ${ERROR_FILE}

  ## --force replace pod can change other field, e.g., spec.container.name
  # Command
  kubectl get ${kube_flags[@]} pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
  kubectl replace ${kube_flags[@]} --force -f /tmp/tmp-valid-pod.json
  # Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
  kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'

  ## check replace --grace-period requires --force
  setvar output_message = $(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
  kube::test::if_has_string ${output_message} '\-\-grace-period must have \-\-force specified'

  ## check replace --timeout requires --force
  setvar output_message = $(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
  kube::test::if_has_string ${output_message} '\-\-timeout must have \-\-force specified'

  #cleaning
  rm /tmp/tmp-valid-pod.json

  ## replace of a cluster scoped resource can succeed
  # Pre-condition: a node exists
  kubectl create -f - ${kube_flags[@]} <<< """
{
  "kind": "Node",
  "apiVersion": "v1",
  "metadata": {
    "name": "node-v1-test"
  }
}
"""
  kubectl replace -f - ${kube_flags[@]} <<< """
{
  "kind": "Node",
  "apiVersion": "v1",
  "metadata": {
    "name": "node-v1-test",
    "annotations": {"a":"b"},
    "resourceVersion": "0"
  }
}
"""

  # Post-condition: the node command succeeds
  kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
  kubectl delete node node-v1-test ${kube_flags[@]}

  ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
  echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
  chmod +x /tmp/tmp-editor.sh
  # Pre-condition: valid-pod POD has image nginx
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
  [[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]]
  # Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
  # cleaning
  rm /tmp/tmp-editor.sh

  ## kubectl edit should work on Windows
  test $(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')
  test $(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')
  test $(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)
  test ! $(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)
  test $(EDITOR=cat kubectl edit ns | grep 'kind: List')

  ### Label POD YAML file locally without effecting the live pod.
  # Pre-condition: name is valid-pod
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  # Command
  setvar output_message = $(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
  echo $output_message
  # Post-condition: name is still valid-pod in the live pod, but command output is the new value
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  kube::test::if_has_string ${output_message} "localonlyvalue"

  ### Overwriting an existing label is not permitted
  # Pre-condition: name is valid-pod
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  # Command
  ! kubectl label pods valid-pod name=valid-pod-super-sayan ${kube_flags[@]}
  # Post-condition: name is still valid-pod
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'

  ### --overwrite must be used to overwrite existing label, can be applied to all resources
  # Pre-condition: name is valid-pod
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
  # Command
  kubectl label --overwrite pods --all name=valid-pod-super-sayan ${kube_flags[@]}
  # Post-condition: name is valid-pod-super-sayan
  kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'

  ### Delete POD by label
  # Pre-condition: valid-pod POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force ${kube_flags[@]}
  # Post-condition: valid-pod POD doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create two PODs from 1 yaml file
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml ${kube_flags[@]}
  # Post-condition: redis-master and valid-pod PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'

  ### Delete two PODs from 1 yaml file
  # Pre-condition: redis-master and valid-pod PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
  # Command
  kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml ${kube_flags[@]}
  # Post-condition: no PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ## kubectl apply should update configuration annotations only if apply is already called
  ## 1. kubectl create doesn't set the annotation
  # Pre-Condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: create a pod "test-pod"
  kubectl create -f hack/testdata/pod.yaml ${kube_flags[@]}
  # Post-Condition: pod "test-pod" is created
  kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
  # Post-Condition: pod "test-pod" doesn't have configuration annotation
  ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  ## 2. kubectl replace doesn't set the annotation
  kubectl get pods test-pod -o yaml ${kube_flags[@]} | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
  # Command: replace the pod "test-pod"
  kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml ${kube_flags[@]}
  # Post-Condition: pod "test-pod" is replaced
  kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
  # Post-Condition: pod "test-pod" doesn't have configuration annotation
  ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  ## 3. kubectl apply does set the annotation
  # Command: apply the pod "test-pod"
  kubectl apply -f hack/testdata/pod-apply.yaml ${kube_flags[@]}
  # Post-Condition: pod "test-pod" is applied
  kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
  # Post-Condition: pod "test-pod" has configuration annotation
  [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  kubectl get pods test-pod -o yaml ${kube_flags[@]} | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
  ## 4. kubectl replace updates an existing annotation
  kubectl get pods test-pod -o yaml ${kube_flags[@]} | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
  # Command: replace the pod "test-pod"
  kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml ${kube_flags[@]}
  # Post-Condition: pod "test-pod" is replaced
  kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
  # Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
  [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  kubectl get pods test-pod -o yaml ${kube_flags[@]} | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
  ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
  # Clean up
  rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
  kubectl delete pods test-pod ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

# Runs tests related to kubectl apply.
proc run_kubectl_apply_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl apply"
  ## kubectl apply should create the resource that doesn't exist yet
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: apply a pod "test-pod" (doesn't exist) should create this pod
  kubectl apply -f hack/testdata/pod.yaml ${kube_flags[@]}
  # Post-Condition: pod "test-pod" is created
  kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
  # Post-Condition: pod "test-pod" has configuration annotation
  [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Clean up
  kubectl delete pods test-pod ${kube_flags[@]}


  ## kubectl apply should be able to clear defaulted fields.
  # Pre-Condition: no deployment exists
  kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
  kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml ${kube_flags[@]}
  # Post-Condition: deployment "test-deployment-retainkeys" created
  kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
  # Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
  [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
  [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
  [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
  [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
  # Command: apply a deployment "test-deployment-retainkeys" should clear
  # defaulted fields and successfully update the deployment
  [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
  # Post-Condition: deployment "test-deployment-retainkeys" has updated fields
  [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
  ! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
  [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
  ! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
  # Clean up
  kubectl delete deployments test-deployment-retainkeys ${kube_flags[@]}


  ## kubectl apply -f with label selector should only apply matching objects
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # apply
  kubectl apply -l unique-label=bingbang -f hack/testdata/filter ${kube_flags[@]}
  # check right pod exists
  kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
  # check wrong pod doesn't exist
  setvar output_message = $(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'pods "selector-test-pod-dont-apply" not found'
  # cleanup
  kubectl delete pods selector-test-pod


  ## kubectl apply --prune
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  # apply a
  kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml ${kube_flags[@]}
  # check right pod exists
  kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
  # check wrong pod doesn't exist
  setvar output_message = $(! kubectl get pods b 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'pods "b" not found'

  # apply b
  kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml ${kube_flags[@]}
  # check right pod exists
  kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
  # check wrong pod doesn't exist
  setvar output_message = $(! kubectl get pods a 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'pods "a" not found'

  # cleanup
  kubectl delete pods b

  # same thing without prune for a sanity check
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  # apply a
  kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml ${kube_flags[@]}
  # check right pod exists
  kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
  # check wrong pod doesn't exist
  setvar output_message = $(! kubectl get pods b 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'pods "b" not found'

  # apply b
  kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml ${kube_flags[@]}
  # check both pods exist
  kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
  kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
  # check wrong pod doesn't exist

  # cleanup
  kubectl delete pod/a pod/b

  ## kubectl apply --prune requires a --all flag to select everything
  setvar output_message = $(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} \
    'all resources selected for prune without explicitly passing --all'
  # should apply everything
  kubectl apply --all --prune -f hack/testdata/prune
  kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
  kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
  kubectl delete pod/a pod/b

  ## kubectl apply --prune should fallback to delete for non reapable types
  kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}"
  kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
  kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}"
  kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  kubectl delete pvc b-pvc 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}"

  ## kubectl apply --prune --prune-whitelist
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # apply pod a
  kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml ${kube_flags[@]}
  # check right pod exists
  kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
  # apply svc and don't prune pod a by overwriting whitelist
  kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}"
  kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
  kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
  # apply svc and prune pod a with default whitelist
  kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}"
  kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # cleanup
  kubectl delete svc prune-svc 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}"

  set +o nounset
  set +o errexit
}

# Runs tests related to kubectl create --filename(-f) --selector(-l).
proc run_kubectl_create_filter_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl create filter"
  ## kubectl create -f with label selector should only create matching objects
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # create
  kubectl create -l unique-label=bingbang -f hack/testdata/filter ${kube_flags[@]}
  # check right pod exists
  kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
  # check wrong pod doesn't exist
  setvar output_message = $(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'pods "selector-test-pod-dont-apply" not found'
  # cleanup
  kubectl delete pods selector-test-pod

  set +o nounset
  set +o errexit
}

proc run_kubectl_apply_deployments_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl apply deployments"
  ## kubectl apply should propagate user defined null values
  # Pre-Condition: no Deployments, ReplicaSets, Pods exist
  kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # apply base deployment
  kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml ${kube_flags[@]}
  # check right deployment exists
  kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
  # check right labels exists
  kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
  kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
  kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'

  # apply new deployment with new template labels
  kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml ${kube_flags[@]}
  # check right labels exists
  kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
  kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
  kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
  kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
  kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
  kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'

  # cleanup
  # need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
  kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
  # Post-Condition: no Deployments, ReplicaSets, Pods exist
  kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

# Runs tests for --save-config tests.
proc run_save_config_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl --save-config"
  ## Configuration annotations should be set when --save-config is enabled
  ## 1. kubectl create --save-config should generate configuration annotation
  # Pre-Condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: create a pod "test-pod"
  kubectl create -f hack/testdata/pod.yaml --save-config ${kube_flags[@]}
  # Post-Condition: pod "test-pod" has configuration annotation
  [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Clean up
  kubectl delete -f hack/testdata/pod.yaml ${kube_flags[@]}
  ## 2. kubectl edit --save-config should generate configuration annotation
  # Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  kubectl create -f hack/testdata/pod.yaml ${kube_flags[@]}
  ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Command: edit the pod "test-pod"
  setvar temp_editor = ""${KUBE_TEMP}/tmp-editor.sh""
  echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
  chmod +x ${temp_editor}
  EDITOR=${temp_editor} kubectl edit pod test-pod --save-config ${kube_flags[@]}
  # Post-Condition: pod "test-pod" has configuration annotation
  [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Clean up
  kubectl delete -f hack/testdata/pod.yaml ${kube_flags[@]}
  ## 3. kubectl replace --save-config should generate configuration annotation
  # Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  kubectl create -f hack/testdata/pod.yaml ${kube_flags[@]}
  ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Command: replace the pod "test-pod"
  kubectl replace -f hack/testdata/pod.yaml --save-config ${kube_flags[@]}
  # Post-Condition: pod "test-pod" has configuration annotation
  [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Clean up
  kubectl delete -f hack/testdata/pod.yaml ${kube_flags[@]}
  ## 4. kubectl run --save-config should generate configuration annotation
  # Pre-Condition: no RC exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: create the rc "nginx" with image nginx
  kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 ${kube_flags[@]}
  # Post-Condition: rc "nginx" has configuration annotation
  [[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  ## 5. kubectl expose --save-config should generate configuration annotation
  # Pre-Condition: no service exists
  kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: expose the rc "nginx"
  kubectl expose rc nginx --save-config --port=80 --target-port=8000 ${kube_flags[@]}
  # Post-Condition: service "nginx" has configuration annotation
  [[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Clean up
  kubectl delete rc,svc nginx
  ## 6. kubectl autoscale --save-config should generate configuration annotation
  # Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  kubectl create -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  ! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Command: autoscale rc "frontend"
  kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config ${kube_flags[@]} --max=2
  # Post-Condition: hpa "frontend" has configuration annotation
  [[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
  # Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
  setvar output_message = $(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'autoscaling/v1'
  setvar output_message = $(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'autoscaling/v1'
  # tests kubectl group prefix matching
  setvar output_message = $(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'autoscaling/v1'
  # Clean up
  # Note that we should delete hpa first, otherwise it may fight with the rc reaper.
  kubectl delete hpa frontend ${kube_flags[@]}
  kubectl delete rc  frontend ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_kubectl_run_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl run"
  ## kubectl run should create deployments, jobs or cronjob
  # Pre-Condition: no Job exists
  kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' ${kube_flags[@]}
  # Post-Condition: Job "pi" is created
  kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By"
  # Clean up
  kubectl delete jobs pi ${kube_flags[@]}
  # Post-condition: no pods exist.
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  # Pre-Condition: no Deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl run nginx-extensions "--image=$IMAGE_NGINX" ${kube_flags[@]}
  # Post-Condition: Deployment "nginx" is created
  kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
  # and old generator was used, iow. old defaults are applied
  setvar output_message = $(kubectl get deployment.extensions/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
  kube::test::if_has_not_string ${output_message} '2'
  # Clean up
  kubectl delete deployment nginx-extensions ${kube_flags[@]}
  # Command
  kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1beta1 ${kube_flags[@]}
  # Post-Condition: Deployment "nginx" is created
  kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
  # and new generator was used, iow. new defaults are applied
  setvar output_message = $(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
  kube::test::if_has_string ${output_message} '2'
  # Clean up
  kubectl delete deployment nginx-apps ${kube_flags[@]}

  # Pre-Condition: no Job exists
  kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' ${kube_flags[@]}
  # Post-Condition: CronJob "pi" is created
  kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
  # Clean up
  kubectl delete cronjobs pi ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_kubectl_get_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl get"
  ### Test retrieval of non-existing pods
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  setvar output_message = $(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
  # Post-condition: POD abc should error since it doesn't exist
  kube::test::if_has_string ${output_message} 'pods "abc" not found'

  ### Test retrieval of non-existing POD with output flag specified
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  setvar output_message = $(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
  # Post-condition: POD abc should error since it doesn't exist
  kube::test::if_has_string ${output_message} 'pods "abc" not found'

  ### Test retrieval of pods when none exist with non-human readable output format flag specified
  # Pre-condition: no pods exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'

  ### Test retrieval of pods when none exist, with human-readable output format flag specified
  # Pre-condition: no pods exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "No resources found" should be part of the output
  kube::test::if_has_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "No resources found" should not be part of the output
  kube::test::if_has_not_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
  # Post-condition: The text "No resources found" should be part of the output
  kube::test::if_has_string ${output_message} 'No resources found'

  ### Test retrieval of non-existing POD with json output flag specified
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  setvar output_message = $(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
  # Post-condition: POD abc should error since it doesn't exist
  kube::test::if_has_string ${output_message} 'pods "abc" not found'
  # Post-condition: make sure we don't display an empty List
  if kube::test::if_has_string ${output_message} 'List' {
    echo 'Unexpected List output'
    echo "${LINENO} $(basename $0)"
    exit 1
  }

  ### Test kubectl get all
  setvar output_message = $(kubectl --v=6 --namespace default get all 2>&1 "${kube_flags[@]}")
  # Post-condition: Check if we get 200 OK from all the url(s)
  kube::test::if_has_string ${output_message} "/api/v1/namespaces/default/pods 200 OK"
  kube::test::if_has_string ${output_message} "/api/v1/namespaces/default/replicationcontrollers 200 OK"
  kube::test::if_has_string ${output_message} "/api/v1/namespaces/default/services 200 OK"
  kube::test::if_has_string ${output_message} "/apis/apps/v1beta1/namespaces/default/statefulsets 200 OK"
  kube::test::if_has_string ${output_message} "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
  kube::test::if_has_string ${output_message} "/apis/batch/v1/namespaces/default/jobs 200 OK"
  kube::test::if_has_string ${output_message} "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
  kube::test::if_has_string ${output_message} "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"

  ### Test --allow-missing-template-keys
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  # Post-condition: valid-pod POD is created
  kubectl get ${kube_flags[@]} pods -o json
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ## check --allow-missing-template-keys defaults to true for jsonpath templates
  kubectl get ${kube_flags[@]} pod valid-pod -o jsonpath='{.missing}'

  ## check --allow-missing-template-keys defaults to true for go templates
  kubectl get ${kube_flags[@]} pod valid-pod -o go-template='{{.missing}}'

  ## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
  setvar output_message = $(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'missing is not found'

  ## check --allow-missing-template-keys=false results in an error for a missing key with go
  setvar output_message = $(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'map has no entry for key "missing"'

  ### Test kubectl get watch
  setvar output_message = $(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'STATUS'    # headers
  kube::test::if_has_string ${output_message} 'valid-pod' # pod details
  setvar output_message = $(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
  kube::test::if_has_not_string ${output_message} 'STATUS' # no headers
  kube::test::if_has_string     ${output_message} 'pods/valid-pod' # resource name
  setvar output_message = $(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
  kube::test::if_has_not_string ${output_message} 'STATUS'          # no headers
  kube::test::if_has_string     ${output_message} 'name: valid-pod' # yaml
  setvar output_message = $(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
  kube::test::if_has_string ${output_message} '"invalid-pod" not found'

  # cleanup
  kubectl delete pods valid-pod ${kube_flags[@]}

  ### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml ${kube_flags[@]}
  # Post-condition: PODs redis-master and valid-pod exist

  # Check that all items in the list are printed
  setvar output_message = $(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} "redis-master valid-pod"

  # cleanup
  kubectl delete pods redis-master valid-pod ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_kubectl_request_timeout_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl request timeout"
  ### Test global request timeout option
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  # Post-condition: valid-pod POD is created
  kubectl get ${kube_flags[@]} pods -o json
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ## check --request-timeout on 'get pod'
  setvar output_message = $(kubectl get pod valid-pod --request-timeout=1)
  kube::test::if_has_string ${output_message} 'valid-pod'

  ## check --request-timeout on 'get pod' with --watch
  setvar output_message = $(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
  kube::test::if_has_string ${output_message} 'Timeout exceeded while reading body'

  ## check --request-timeout value with no time unit
  setvar output_message = $(kubectl get pod valid-pod --request-timeout=1 2>&1)
  kube::test::if_has_string ${output_message} 'valid-pod'

  ## check --request-timeout value with invalid time unit
  setvar output_message = $(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
  kube::test::if_has_string ${output_message} 'Invalid timeout value'

  # cleanup
  kubectl delete pods valid-pod ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_crd_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl crd"
  kubectl ${kube_flags_with_token[@]} create -f - <<< """
{
  "kind": "CustomResourceDefinition",
  "apiVersion": "apiextensions.k8s.io/v1beta1",
  "metadata": {
    "name": "foos.company.com"
  },
  "spec": {
    "group": "company.com",
    "version": "v1",
    "names": {
      "plural": "foos",
      "kind": "Foo"
    }
  }
}
"""

  # Post-Condition: assertion object exist
  kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'foos.company.com:'

  kubectl ${kube_flags_with_token[@]} create -f - <<< """
{
  "kind": "CustomResourceDefinition",
  "apiVersion": "apiextensions.k8s.io/v1beta1",
  "metadata": {
    "name": "bars.company.com"
  },
  "spec": {
    "group": "company.com",
    "version": "v1",
    "names": {
      "plural": "bars",
      "kind": "Bar"
    }
  }
}
"""

  # Post-Condition: assertion object exist
  kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:'

  run_non_native_resource_tests

  # teardown
  kubectl delete customresourcedefinitions/foos.company.com ${kube_flags_with_token[@]}
  kubectl delete customresourcedefinitions/bars.company.com ${kube_flags_with_token[@]}

  set +o nounset
  set +o errexit
}

proc kube::util::non_native_resources {
  local times
  local wait
  local failed
  setvar times = '30'
  setvar wait = '10'
  local i
  for i in $(seq 1 $times) {
    setvar failed = """"
    kubectl ${kube_flags[@]} get --raw '/apis/company.com/v1' || setvar failed = 'true'
    kubectl ${kube_flags[@]} get --raw '/apis/company.com/v1/foos' || setvar failed = 'true'
    kubectl ${kube_flags[@]} get --raw '/apis/company.com/v1/bars' || setvar failed = 'true'

    if test -z ${failed} {
      return 0
    }
    sleep ${wait}
  }

  kube::log::error "Timed out waiting for non-native-resources; tried ${times} waiting ${wait}s between each"
  return 1
}

proc run_non_native_resource_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl non-native resources"
  kube::util::non_native_resources

  # Test that we can list this new CustomResource (foos)
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''

  # Test that we can list this new CustomResource (bars)
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''

  # Test that we can create a new resource of type Foo
  kubectl ${kube_flags[@]} create -f hack/testdata/CRD/foo.yaml ${kube_flags[@]}

  # Test that we can list this new custom resource
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'

  # Test alternate forms
  kube::test::get_object_assert foo                 "{{range.items}}{{$id_field}}:{{end}}" 'test:'
  kube::test::get_object_assert foos.company.com    "{{range.items}}{{$id_field}}:{{end}}" 'test:'
  kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'

  # Test all printers, with lists and individual items
  kube::log::status "Testing CustomResource printing"
  kubectl ${kube_flags[@]} get foos
  kubectl ${kube_flags[@]} get foos/test
  kubectl ${kube_flags[@]} get foos      -o name
  kubectl ${kube_flags[@]} get foos/test -o name
  kubectl ${kube_flags[@]} get foos      -o wide
  kubectl ${kube_flags[@]} get foos/test -o wide
  kubectl ${kube_flags[@]} get foos      -o json
  kubectl ${kube_flags[@]} get foos/test -o json
  kubectl ${kube_flags[@]} get foos      -o yaml
  kubectl ${kube_flags[@]} get foos/test -o yaml
  kubectl ${kube_flags[@]} get foos      -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
  kubectl ${kube_flags[@]} get foos/test -o "jsonpath={.someField}"          --allow-missing-template-keys=false
  kubectl ${kube_flags[@]} get foos      -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
  kubectl ${kube_flags[@]} get foos/test -o "go-template={{.someField}}"                        --allow-missing-template-keys=false
  setvar output_message = $(kubectl "${kube_flags[@]}" get foos/test -o name)
  kube::test::if_has_string ${output_message} 'foos/test'

  # Test patching
  kube::log::status "Testing CustomResource patching"
  kubectl ${kube_flags[@]} patch foos/test -p '{"patched":"value1"}' --type=merge
  kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
  kubectl ${kube_flags[@]} patch foos/test -p '{"patched":"value2"}' --type=merge --record
  kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
  kubectl ${kube_flags[@]} patch foos/test -p '{"patched":null}' --type=merge --record
  kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
  # Get local version
  setvar CRD_RESOURCE_FILE = ""${KUBE_TEMP}/crd-foos-test.json""
  kubectl ${kube_flags[@]} get foos/test -o json > "${CRD_RESOURCE_FILE}"
  # cannot apply strategic patch locally
  setvar CRD_PATCH_ERROR_FILE = ""${KUBE_TEMP}/crd-foos-test-error""
  ! kubectl ${kube_flags[@]} patch --local -f ${CRD_RESOURCE_FILE} -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
  if grep -q "try --type merge" ${CRD_PATCH_ERROR_FILE} {
    kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat ${CRD_PATCH_ERROR_FILE})"
  } else {
    kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${CRD_PATCH_ERROR_FILE})"
    exit 1
  }
  # can apply merge patch locally
  kubectl ${kube_flags[@]} patch --local -f ${CRD_RESOURCE_FILE} -p '{"patched":"value3"}' --type=merge -o json
  # can apply merge patch remotely
  kubectl ${kube_flags[@]} patch --record -f ${CRD_RESOURCE_FILE} -p '{"patched":"value3"}' --type=merge -o json
  kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
  rm ${CRD_RESOURCE_FILE}
  rm ${CRD_PATCH_ERROR_FILE}

  # Test labeling
  kube::log::status "Testing CustomResource labeling"
  kubectl ${kube_flags[@]} label foos --all listlabel=true
  kubectl ${kube_flags[@]} label foo/test itemlabel=true

  # Test annotating
  kube::log::status "Testing CustomResource annotating"
  kubectl ${kube_flags[@]} annotate foos --all listannotation=true
  kubectl ${kube_flags[@]} annotate foo/test itemannotation=true

  # Test describing
  kube::log::status "Testing CustomResource describing"
  kubectl ${kube_flags[@]} describe foos
  kubectl ${kube_flags[@]} describe foos/test
  kubectl ${kube_flags[@]} describe foos | grep listlabel=true
  kubectl ${kube_flags[@]} describe foos | grep itemlabel=true

  # Delete the resource with cascade.
  kubectl ${kube_flags[@]} delete foos test --cascade=true

  # Make sure it's gone
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''

  # Test that we can create a new resource of type Bar
  kubectl ${kube_flags[@]} create -f hack/testdata/CRD/bar.yaml ${kube_flags[@]}

  # Test that we can list this new custom resource
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'

  # Test that we can watch the resource.
  # Start watcher in background with process substitution,
  # so we can read from stdout asynchronously.
  kube::log::status "Testing CustomResource watching"
  exec 3< <(kubectl "${kube_flags[@]}" get bars --request-timeout=1m --watch-only -o name & echo $! ; wait)
  local watch_pid
  read <&3 watch_pid<&3 watch_pid

  # We can't be sure when the watch gets established,
  # so keep triggering events (in the background) until something comes through.
  local tries=0
  while test ${tries} -lt 10 {
    setvar tries = $((tries+1))
    kubectl ${kube_flags[@]} patch bars/test -p "{\"patched\":\"${tries}\"}" --type=merge
    sleep 1
  } &
  local patch_pid=$!

  # Wait up to 30s for a complete line of output.
  local watch_output
  read <&3 -t 30 watch_output<&3 -t 30 watch_output
  # Stop the watcher and the patch loop.
  kill -9 ${watch_pid}
  kill -9 ${patch_pid}
  kube::test::if_has_string ${watch_output} 'bars/test'

  # Delete the resource without cascade.
  kubectl ${kube_flags[@]} delete bars test --cascade=false

  # Make sure it's gone
  kube::test::wait_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''

  # Test that we can create single item via apply
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/foo.yaml

  # Test that we have create a foo named test
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'

  # Test that the field has the expected value
  kube::test::get_object_assert foos/test '{{.someField}}' 'field1'

  # Test that apply an empty patch doesn't change fields
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/foo.yaml

  # Test that the field has the same value after re-apply
  kube::test::get_object_assert foos/test '{{.someField}}' 'field1'

  # Test that apply has updated the subfield
  kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'

  # Update a subfield and then apply the change
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/foo-updated-subfield.yaml

  # Test that apply has updated the subfield
  kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'

  # Test that the field has the expected value
  kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'

  # Delete a subfield and then apply the change
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/foo-deleted-subfield.yaml

  # Test that apply has deleted the field
  kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'

  # Test that the field does not exist
  kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'

  # Add a field and then apply the change
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/foo-added-subfield.yaml

  # Test that apply has added the field
  kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'

  # Delete the resource
  kubectl ${kube_flags[@]} delete -f hack/testdata/CRD/foo.yaml

  # Make sure it's gone
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''

  # Test that we can create list via apply
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/multi-crd-list.yaml

  # Test that we have create a foo and a bar from a list
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'

  # Test that the field has the expected value
  kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
  kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'

  # Test that re-apply an list doesn't change anything
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/multi-crd-list.yaml

  # Test that the field has the same value after re-apply
  kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
  kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'

  # Test that the fields have the expected value
  kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
  kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'

  # Update fields and then apply the change
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/multi-crd-list-updated-field.yaml

  # Test that apply has updated the fields
  kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
  kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'

  # Test that the field has the expected value
  kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
  kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'

  # Delete fields and then apply the change
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/multi-crd-list-deleted-field.yaml

  # Test that apply has deleted the fields
  kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
  kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'

  # Test that the fields does not exist
  kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
  kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'

  # Add a field and then apply the change
  kubectl ${kube_flags[@]} apply -f hack/testdata/CRD/multi-crd-list-added-field.yaml

  # Test that apply has added the field
  kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
  kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'

  # Delete the resource
  kubectl ${kube_flags[@]} delete -f hack/testdata/CRD/multi-crd-list.yaml

  # Make sure it's gone
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''

  ## kubectl apply --prune
  # Test that no foo or bar exist
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''

  # apply --prune on foo.yaml that has foo/test
  kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/foo.yaml ${kube_flags[@]} --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
  # check right crds exist
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''

  # apply --prune on bar.yaml that has bar/test
  kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/bar.yaml ${kube_flags[@]} --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
  # check right crds exist
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'

  # Delete the resource
  kubectl ${kube_flags[@]} delete -f hack/testdata/CRD/bar.yaml

  # Make sure it's gone
  kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''

  # Test 'kubectl create' with namespace, and namespace cleanup.
  kubectl ${kube_flags[@]} create namespace non-native-resources
  kubectl ${kube_flags[@]} create -f hack/testdata/CRD/bar.yaml --namespace=non-native-resources
  kube::test::get_object_assert bars '{{len .items}}' '1' --namespace=non-native-resources
  kubectl ${kube_flags[@]} delete namespace non-native-resources
  # Make sure objects go away.
  kube::test::wait_object_assert bars '{{len .items}}' '0' --namespace=non-native-resources
  # Make sure namespace goes away.
  local tries=0
  while kubectl ${kube_flags[@]} get namespace non-native-resources && test ${tries} -lt 10 {
    setvar tries = $((tries+1))
    sleep ${tries}
  }

  set +o nounset
  set +o errexit
}

proc run_recursive_resources_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing recursive resources"
  ### Create multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: no POD exists
  create_and_use_new_namespace
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  setvar output_message = $(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  kube::test::if_has_string ${output_message} 'error validating data: kind not set'

  ## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
  chmod +x /tmp/tmp-editor.sh
  setvar output_message = $(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
  # The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
  # a list but since it contains invalid objects, it will never open.
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  # cleaning
  rm /tmp/tmp-editor.sh

  ## Replace multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
  kube::test::if_has_string ${output_message} 'error validating data: kind not set'

  ## Describe multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
  kube::test::if_has_string ${output_message} "app=busybox0"
  kube::test::if_has_string ${output_message} "app=busybox1"
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ## Annotate multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ## Apply multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
  kube::test::if_has_string ${output_message} 'error validating data: kind not set'


  ### Convert deployment YAML file locally without affecting the live deployment.
  # Pre-condition: no deployments exist
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  # Create a deployment (revision 1)
  kubectl create -f hack/testdata/deployment-revision1.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Command
  setvar output_message = $(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")
  echo $output_message
  # Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
  kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
  kube::test::if_has_string ${output_message} "apps/v1beta1"
  # Clean up
  kubectl delete deployment nginx ${kube_flags[@]}

  ## Convert multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ## Get multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
  # Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
  kube::test::if_has_string ${output_message} "busybox0:busybox1:"
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ## Label multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
  echo $output_message
  # Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
  kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ## Patch multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
  echo $output_message
  # Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
  kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ### Delete multiple busybox PODs recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ### Create replication controller recursively from directory of YAML files
  # Pre-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  ! kubectl create -f hack/testdata/recursive/rc --recursive ${kube_flags[@]}
  # Post-condition: frontend replication controller is created
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'

  ### Autoscale multiple replication controllers recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 replication controllers exist & 1
  # replica each
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
  kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
  # Command
  setvar output_message = $(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox replication controllers are autoscaled
  # with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
  kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
  kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  kubectl delete hpa busybox0 ${kube_flags[@]}
  kubectl delete hpa busybox1 ${kube_flags[@]}

  ### Expose multiple replication controllers as service recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 replication controllers exist & 1
  # replica each
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
  kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
  # Command
  setvar output_message = $(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
  # Post-condition: service exists and the port is unnamed
  kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
  kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ### Scale multiple replication controllers recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 replication controllers exist & 1
  # replica each
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
  kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
  # Command
  setvar output_message = $(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
  kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
  kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ### Delete multiple busybox replication controllers recursively from directory of YAML files
  # Pre-condition: busybox0 & busybox1 PODs exist
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  setvar output_message = $(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"

  ### Rollout on multiple deployments recursively
  # Pre-condition: no deployments exist
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  # Create deployments (revision 1) recursively from directory of YAML files
  ! kubectl create -f hack/testdata/recursive/deployment --recursive ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
  ## Rollback the deployments to revision 1 recursively
  setvar output_message = $(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
  # Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  ## Pause the deployments recursively
  setvar PRESERVE_ERR_FILE = 'true'
  kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive ${kube_flags[@]}
  setvar output_message = $(cat ${ERROR_FILE})
  # Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
  kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  ## Resume the deployments recursively
  kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive ${kube_flags[@]}
  setvar output_message = $(cat ${ERROR_FILE})
  # Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
  kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  ## Retrieve the rollout history of the deployments recursively
  setvar output_message = $(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
  kube::test::if_has_string ${output_message} "nginx0-deployment"
  kube::test::if_has_string ${output_message} "nginx1-deployment"
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  # Clean up
  unset PRESERVE_ERR_FILE
  rm ${ERROR_FILE}
  ! kubectl delete -f hack/testdata/recursive/deployment --recursive ${kube_flags[@]} --grace-period=0 --force
  sleep 1

  ### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
  # Pre-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  # Create replication controllers recursively from directory of YAML files
  ! kubectl create -f hack/testdata/recursive/rc --recursive ${kube_flags[@]}
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
  # Command
  ## Attempt to rollback the replication controllers to revision 1 recursively
  setvar output_message = $(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
  kube::test::if_has_string ${output_message} 'no rollbacker has been implemented for {"" "ReplicationController"}'
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  ## Attempt to pause the replication controllers recursively
  setvar output_message = $(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  kube::test::if_has_string ${output_message} 'replicationcontrollers "busybox0" pausing is not supported'
  kube::test::if_has_string ${output_message} 'replicationcontrollers "busybox1" pausing is not supported'
  ## Attempt to resume the replication controllers recursively
  setvar output_message = $(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
  # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
  kube::test::if_has_string ${output_message} "Object 'Kind' is missing"
  kube::test::if_has_string ${output_message} 'replicationcontrollers "busybox0" resuming is not supported'
  kube::test::if_has_string ${output_message} 'replicationcontrollers "busybox0" resuming is not supported'
  # Clean up
  ! kubectl delete -f hack/testdata/recursive/rc --recursive ${kube_flags[@]} --grace-period=0 --force
  sleep 1

  set +o nounset
  set +o errexit
}

proc run_namespace_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl(v1:namespaces)"
  ### Create a new namespace
  # Pre-condition: only the "default" namespace exists
  # The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
  # kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
  # Command
  kubectl create namespace my-namespace
  # Post-condition: namespace 'my-namespace' is created.
  kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
  # Clean up
  kubectl delete namespace my-namespace

  ######################
  # Pods in Namespaces #
  ######################

  if kube::test::if_supports_resource ${pods}  {
    ### Create a new namespace
    # Pre-condition: the other namespace does not exist
    kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':'
    # Command
    kubectl create namespace other
    # Post-condition: namespace 'other' is created.
    kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'

    ### Create POD valid-pod in specific namespace
    # Pre-condition: no POD exists
    kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
    # Command
    kubectl create ${kube_flags[@]} --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
    # Post-condition: valid-pod POD is created
    kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
    # Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
    kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
    # Post-condition: a resource cannot be retrieved by name across all namespaces
    setvar output_message = $(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
    kube::test::if_has_string ${output_message} "a resource cannot be retrieved by name across all namespaces"

    ### Delete POD valid-pod in specific namespace
    # Pre-condition: valid-pod POD exists
    kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
    # Command
    kubectl delete ${kube_flags[@]} pod --namespace=other valid-pod --grace-period=0 --force
    # Post-condition: valid-pod POD doesn't exist
    kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
    # Clean up
    kubectl delete namespace other
  }

  set +o nounset
  set +o errexit
}

proc run_secrets_test {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing secrets"
  ### Create a new namespace
  # Pre-condition: the test-secrets namespace does not exist
  kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':'
  # Command
  kubectl create namespace test-secrets
  # Post-condition: namespace 'test-secrets' is created.
  kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'

  ### Create a generic secret in a specific namespace
  # Pre-condition: no SECRET exists
  kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
  # Post-condition: secret exists and has expected values
  kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
  [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
  # Clean-up
  kubectl delete secret test-secret --namespace=test-secrets

  ### Create a docker-registry secret in a specific namespace
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  }
  # Pre-condition: no SECRET exists
  kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets
  # Post-condition: secret exists and has expected values
  kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg'
  [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]]
  # Clean-up
  kubectl delete secret test-secret --namespace=test-secrets

  ### Create a tls secret
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  }
  # Pre-condition: no SECRET exists
  kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
  kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
  kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
  # Clean-up
  kubectl delete secret test-secret --namespace=test-secrets

  # Create a secret using stringData
  kubectl create --namespace=test-secrets -f - ${kube_flags[@]} <<< """
{
  "kind": "Secret",
  "apiVersion": "v1",
  "metadata": {
    "name": "secret-string-data"
  },
  "data": {
    "k1":"djE=",
    "k2":""
  },
  "stringData": {
    "k2":"v2"
  }
}
"""
  # Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
  kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
  kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
  kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
  # Clean up
  kubectl delete secret secret-string-data --namespace=test-secrets

  ### Create a secret using output flags
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  }
  # Pre-condition: no secret exists
  kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  [[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]]
  ## Clean-up
  kubectl delete secret test-secret --namespace=test-secrets
  # Clean up
  kubectl delete namespace test-secrets

  set +o nounset
  set +o errexit
}

proc run_configmap_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing configmaps"
  kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
  kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap'
  kubectl delete configmap test-configmap ${kube_flags[@]}

  ### Create a new namespace
  # Pre-condition: the test-configmaps namespace does not exist
  kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':'
  # Command
  kubectl create namespace test-configmaps
  # Post-condition: namespace 'test-configmaps' is created.
  kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'

  ### Create a generic configmap in a specific namespace
  # Pre-condition: no configmaps namespace exists
  kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
  # Post-condition: configmap exists and has expected values
  kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
  [[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
  # Clean-up
  kubectl delete configmap test-configmap --namespace=test-configmaps
  kubectl delete namespace test-configmaps

  set +o nounset
  set +o errexit
}

proc run_service_tests {
  set -o nounset
  set -o errexit

  # switch back to the default namespace
  kubectl config set-context ${CONTEXT} --namespace=""
  kube::log::status "Testing kubectl(v1:services)"

  ### Create redis-master service from JSON
  # Pre-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  # Command
  kubectl create -f examples/guestbook/redis-master-service.yaml ${kube_flags[@]}
  # Post-condition: redis-master service exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  # Describe command should print detailed information
  kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
  # Describe command should print events information by default
  kube::test::describe_object_events_assert services 'redis-master'
  # Describe command should not print events information when show-events=false
  kube::test::describe_object_events_assert services 'redis-master' false
  # Describe command should print events information when show-events=true
  kube::test::describe_object_events_assert services 'redis-master' true
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
  # Describe command should print events information by default
  kube::test::describe_resource_events_assert services
  # Describe command should not print events information when show-events=false
  kube::test::describe_resource_events_assert services false
  # Describe command should print events information when show-events=true
  kube::test::describe_resource_events_assert services true

  ### set selector
  # prove role=master
  kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"

  # Set selector of a local file without talking to the server
  kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --local -o yaml ${kube_flags[@]}
  ! kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml ${kube_flags[@]}
  # Set command to change the selector.
  kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan
  # prove role=padawan
  kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
  # Set command to reset the selector back to the original one.
  kubectl set selector -f examples/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
  # prove role=master
  kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
  # Show dry-run works on running selector
  kubectl set selector services redis-master role=padawan --dry-run -o yaml ${kube_flags[@]}
  ! kubectl set selector services redis-master role=padawan --local -o yaml ${kube_flags[@]}
  kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"

  ### Dump current redis-master service
  setvar output_service = $(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")

  ### Delete redis-master-service by id
  # Pre-condition: redis-master service exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  # Command
  kubectl delete service redis-master ${kube_flags[@]}
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  }
  # Post-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'

  ### Create redis-master-service from dumped JSON
  # Pre-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  # Command
  echo ${output_service} | kubectl create -f - ${kube_flags[@]}
  # Post-condition: redis-master service is created
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'

  ### Create redis-master-v1-test service
  # Pre-condition: redis-master-service service exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
  # Command
  kubectl create -f - ${kube_flags[@]} <<< """
{
  "kind": "Service",
  "apiVersion": "v1",
  "metadata": {
    "name": "service-v1-test"
  },
  "spec": {
    "ports": [
      {
        "protocol": "TCP",
        "port": 80,
        "targetPort": 80
      }
    ]
  }
}
"""
  # Post-condition: service-v1-test service is created
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'

  ### Identity
  kubectl get service ${kube_flags[@]} service-v1-test -o json | kubectl replace ${kube_flags[@]} -f -

  ### Delete services by id
  # Pre-condition: service-v1-test exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
  # Command
  kubectl delete service redis-master ${kube_flags[@]}
  kubectl delete service "service-v1-test" ${kube_flags[@]}
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  }
  # Post-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'

  ### Create two services
  # Pre-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  # Command
  kubectl create -f examples/guestbook/redis-master-service.yaml ${kube_flags[@]}
  kubectl create -f examples/guestbook/redis-slave-service.yaml ${kube_flags[@]}
  # Post-condition: redis-master and redis-slave services are created
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'

  ### Custom columns can be specified
  # Pre-condition: generate output using custom columns
  setvar output_message = $(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
  # Post-condition: should contain name column
  kube::test::if_has_string ${output_message} 'redis-master'

  ### Delete multiple services at once
  # Pre-condition: redis-master and redis-slave services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
  # Command
  kubectl delete services redis-master redis-slave ${kube_flags[@]} # delete multiple services at once
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  }
  # Post-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'

  ### Create an ExternalName service
  # Pre-condition: Only the default kubernetes service exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  # Command
  kubectl create service externalname beep-boop --external-name bar.com
  # Post-condition: beep-boop service is created
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'

  ### Delete beep-boop service by id
  # Pre-condition: beep-boop service exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
  # Command
  kubectl delete service beep-boop ${kube_flags[@]}
  if [[ "${WAIT_FOR_DELETION:-}" == "true" ]] {
    kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
  }
  # Post-condition: Only the default kubernetes services exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'

  set +o nounset
  set +o errexit
}

proc run_rc_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:replicationcontrollers)"

  ### Create and stop controller, make sure it doesn't leak pods
  # Pre-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  kubectl delete rc frontend ${kube_flags[@]}
  # Post-condition: no pods from frontend controller
  kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create replication controller frontend from JSON
  # Pre-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  # Post-condition: frontend replication controller is created
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  # Describe command should print detailed information
  kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
  # Describe command should print events information by default
  kube::test::describe_object_events_assert rc 'frontend'
  # Describe command should not print events information when show-events=false
  kube::test::describe_object_events_assert rc 'frontend' false
  # Describe command should print events information when show-events=true
  kube::test::describe_object_events_assert rc 'frontend' true
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
  # Describe command should print events information by default
  kube::test::describe_resource_events_assert rc
  # Describe command should not print events information when show-events=false
  kube::test::describe_resource_events_assert rc false
  # Describe command should print events information when show-events=true
  kube::test::describe_resource_events_assert rc true

  ### Scale replication controller frontend with current-replicas and replicas
  # Pre-condition: 3 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  # Command
  kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend ${kube_flags[@]}
  # Post-condition: 2 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'

  ### Scale replication controller frontend with (wrong) current-replicas and replicas
  # Pre-condition: 2 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  # Command
  ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend ${kube_flags[@]}
  # Post-condition: nothing changed
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'

  ### Scale replication controller frontend with replicas only
  # Pre-condition: 2 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  # Command
  kubectl scale  --replicas=3 replicationcontrollers frontend ${kube_flags[@]}
  # Post-condition: 3 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'

  ### Scale replication controller from JSON with replicas only
  # Pre-condition: 3 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  # Command
  kubectl scale  --replicas=2 -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  # Post-condition: 2 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
  # Clean-up
  kubectl delete rc frontend ${kube_flags[@]}

  ### Scale multiple replication controllers
  kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml ${kube_flags[@]}
  kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml ${kube_flags[@]}
  # Command
  kubectl scale rc/redis-master rc/redis-slave --replicas=4 ${kube_flags[@]}
  # Post-condition: 4 replicas each
  kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
  kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
  # Clean-up
  kubectl delete rc redis-{master,slave} ${kube_flags[@]}

  ### Scale a job
  kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml ${kube_flags[@]}
  # Command
  kubectl scale --replicas=2 job/pi
  # Post-condition: 2 replicas for pi
  kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
  # Clean-up
  kubectl delete job/pi ${kube_flags[@]}

  ### Scale a deployment
  kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml ${kube_flags[@]}
  # Command
  kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
  # Post-condition: 1 replica for nginx-deployment
  kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
  # Clean-up
  kubectl delete deployment/nginx-deployment ${kube_flags[@]}

  ### Expose a deployment as a service
  kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml ${kube_flags[@]}
  # Pre-condition: 3 replicas
  kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
  # Command
  kubectl expose deployment/nginx-deployment
  # Post-condition: service exists and exposes deployment port (80)
  kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
  # Clean-up
  kubectl delete deployment/nginx-deployment service/nginx-deployment ${kube_flags[@]}

  ### Expose replication controller as service
  kubectl create -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  # Pre-condition: 3 replicas
  kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
  # Command
  kubectl expose rc frontend --port=80 ${kube_flags[@]}
  # Post-condition: service exists and the port is unnamed
  kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
  # Command
  kubectl expose service frontend --port=443 --name=frontend-2 ${kube_flags[@]}
  # Post-condition: service exists and the port is unnamed
  kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
  # Command
  kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml ${kube_flags[@]}
  kubectl expose pod valid-pod --port=444 --name=frontend-3 ${kube_flags[@]}
  # Post-condition: service exists and the port is unnamed
  kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
  # Create a service using service/v1 generator
  kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 ${kube_flags[@]}
  # Post-condition: service exists and the port is named default.
  kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
  # Verify that expose service works without specifying a port.
  kubectl expose service frontend --name=frontend-5 ${kube_flags[@]}
  # Post-condition: service exists with the same port as the original service.
  kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
  # Cleanup services
  kubectl delete pod valid-pod ${kube_flags[@]}
  kubectl delete service frontend{,-2,-3,-4,-5} ${kube_flags[@]}

  ### Expose negative invalid resource test
  # Pre-condition: don't need
  # Command
  setvar output_message = $(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
  # Post-condition: the error message has "cannot expose" string
  kube::test::if_has_string ${output_message} 'cannot expose'

  ### Try to generate a service with invalid name (exceeding maximum valid size)
  # Pre-condition: use --name flag
  setvar output_message = $(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
  # Post-condition: should fail due to invalid name
  kube::test::if_has_string ${output_message} 'metadata.name: Invalid value'
  # Pre-condition: default run without --name flag; should succeed by truncating the inherited name
  setvar output_message = $(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
  # Post-condition: inherited name from pod has been truncated
  kube::test::if_has_string ${output_message} '\"kubernetes-serve-hostname-testing-sixty-three-characters-in-len\" exposed'
  # Clean-up
  kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len ${kube_flags[@]}

  ### Expose multiport object as a new service
  # Pre-condition: don't use --port flag
  setvar output_message = $(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
  # Post-condition: expose succeeded
  kube::test::if_has_string ${output_message} '\"etcd-server\" exposed'
  # Post-condition: generated service has both ports from the exposed pod
  kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
  kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 2379'
  # Clean-up
  kubectl delete svc etcd-server ${kube_flags[@]}

  ### Delete replication controller with id
  # Pre-condition: frontend replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  # Command
  kubectl delete rc frontend ${kube_flags[@]}
  # Post-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create two replication controllers
  # Pre-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml ${kube_flags[@]}
  # Post-condition: frontend and redis-slave
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'

  ### Delete multiple controllers at once
  # Pre-condition: frontend and redis-slave
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
  # Command
  kubectl delete rc frontend redis-slave ${kube_flags[@]} # delete multiple controllers at once
  # Post-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Auto scale replication controller
  # Pre-condition: no replication controller exists
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-controller.yaml ${kube_flags[@]}
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  # autoscale 1~2 pods, CPU utilization 70%, rc specified by file
  kubectl autoscale -f hack/testdata/frontend-controller.yaml ${kube_flags[@]} --max=2 --cpu-percent=70
  kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
  kubectl delete hpa frontend ${kube_flags[@]}
  # autoscale 2~3 pods, no CPU utilization specified, rc specified by name
  kubectl autoscale rc frontend ${kube_flags[@]} --min=2 --max=3
  kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
  kubectl delete hpa frontend ${kube_flags[@]}
  # autoscale without specifying --max should fail
  ! kubectl autoscale rc frontend ${kube_flags[@]}
  # Clean up
  kubectl delete rc frontend ${kube_flags[@]}

  ## Set resource limits/request of a deployment
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Set resources of a local file without talking to the server
  kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml ${kube_flags[@]}
  ! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml ${kube_flags[@]}
  # Create a deployment
  kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
  # Set the deployment's cpu limits
  kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
  # Set a non-existing container should fail
  ! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
  # Set the limit of a specific container in deployment
  kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
  # Set limits/requests of a deployment specified by a file
  kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
  # Show dry-run works on running deployments
  kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml ${kube_flags[@]}
  ! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
  kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
  # Clean up
  kubectl delete deployment nginx-deployment-resources ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_deployment_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing deployments"
  # Test kubectl create deployment (using default - old generator)
  kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd
  # Post-Condition: Deployment "nginx" is created.
  kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
  # and old generator was used, iow. old defaults are applied
  setvar output_message = $(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
  kube::test::if_has_not_string ${output_message} '2'
  # Ensure we can interact with deployments through extensions and apps endpoints
  setvar output_message = $(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'extensions/v1beta1'
  setvar output_message = $(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'apps/v1beta1'
  # Clean up
  kubectl delete deployment test-nginx-extensions ${kube_flags[@]}

  # Test kubectl create deployment
  kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
  # Post-Condition: Deployment "nginx" is created.
  kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
  # and new generator was used, iow. new defaults are applied
  setvar output_message = $(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
  kube::test::if_has_string ${output_message} '2'
  # Ensure we can interact with deployments through extensions and apps endpoints
  setvar output_message = $(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'extensions/v1beta1'
  setvar output_message = $(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} 'apps/v1beta1'
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
  # Clean up
  kubectl delete deployment test-nginx-apps ${kube_flags[@]}

  ### Test kubectl create deployment should not fail validation
  # Pre-Condition: No deployment exists.
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml ${kube_flags[@]}
  # Post-Condition: Deployment "deployment-with-unixuserid" is created.
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
  # Clean up
  kubectl delete deployment deployment-with-unixuserid ${kube_flags[@]}

  ### Test cascading deletion
  ## Test that rs is deleted when deployment is deleted.
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Create deployment
  kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml ${kube_flags[@]}
  # Wait for rs to come up.
  kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
  # Deleting the deployment should delete the rs.
  kubectl delete deployment nginx-deployment ${kube_flags[@]}
  kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''

  ## Test that rs is not deleted when deployment is deleted with cascade set to false.
  # Pre-condition: no deployment and rs exist
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Create deployment
  kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
  # Wait for rs to come up.
  kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
  # Delete the deployment with cascade set to false.
  kubectl delete deployment nginx-deployment ${kube_flags[@]} --cascade=false
  # Wait for the deployment to be deleted and then verify that rs is not
  # deleted.
  kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
  # Cleanup
  # Find the name of the rs to be deleted.
  setvar output_message = $(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
  kubectl delete rs ${output_message} ${kube_flags[@]}

  ### Auto scale deployment
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
  # autoscale 2~3 pods, no CPU utilization specified
  kubectl-with-retry autoscale deployment nginx-deployment ${kube_flags[@]} --min=2 --max=3
  kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
  # Clean up
  # Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
  kubectl delete hpa nginx-deployment ${kube_flags[@]}
  kubectl delete deployment.extensions nginx-deployment ${kube_flags[@]}

  ### Rollback a deployment
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  # Create a deployment (revision 1)
  kubectl create -f hack/testdata/deployment-revision1.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Rollback to revision 1 - should be no-op
  kubectl rollout undo deployment nginx --to-revision=1 ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Update the deployment (revision 2)
  kubectl apply -f hack/testdata/deployment-revision2.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
  # Rollback to revision 1 with dry-run - should be no-op
  kubectl rollout undo deployment nginx --dry-run=true ${kube_flags[@]} | grep "test-cmd"
  kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
  # Rollback to revision 1
  kubectl rollout undo deployment nginx --to-revision=1 ${kube_flags[@]}
  sleep 1
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Rollback to revision 1000000 - should be no-op
  kubectl rollout undo deployment nginx --to-revision=1000000 ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Rollback to last revision
  kubectl rollout undo deployment nginx ${kube_flags[@]}
  sleep 1
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
  # Pause the deployment
  kubectl-with-retry rollout pause deployment nginx ${kube_flags[@]}
  # A paused deployment cannot be rolled back
  ! kubectl rollout undo deployment nginx ${kube_flags[@]}
  # Resume the deployment
  kubectl-with-retry rollout resume deployment nginx ${kube_flags[@]}
  # The resumed deployment can now be rolled back
  kubectl rollout undo deployment nginx ${kube_flags[@]}
  # Check that the new replica set has all old revisions stored in an annotation
  setvar newrs = "$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
  kubectl get rs ${newrs} -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
  # Check that trying to watch the status of a superseded revision returns an error
  ! kubectl rollout status deployment/nginx --revision=3
  cat hack/testdata/deployment-revision1.yaml | $SED "s/name: nginx$/name: nginx2/" | kubectl create -f - ${kube_flags[@]}
  # Deletion of both deployments should not be blocked
  kubectl delete deployment nginx2 ${kube_flags[@]}
  # Clean up
  kubectl delete deployment nginx ${kube_flags[@]}

  ### Set image of a deployment
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Create a deployment
  kubectl create -f hack/testdata/deployment-multicontainer.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
  # Set the deployment's image
  kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
  # Set non-existing container should fail
  ! kubectl set image deployment nginx-deployment redis=redis ${kube_flags[@]}
  # Set image of deployments without specifying name
  kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
  # Set image of a deployment specified by file
  kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
  # Set image of a local file without talking to the server
  kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" ${kube_flags[@]} --local -o yaml
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
  # Set image of all containers of the deployment
  kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Set image of all containners of the deployment again when image not change
  kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
  # Clean up
  kubectl delete deployment nginx-deployment ${kube_flags[@]}

  ### Set env of a deployment
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Create a deployment
  kubectl create -f hack/testdata/deployment-multicontainer.yaml ${kube_flags[@]}
  kubectl create -f hack/testdata/configmap.yaml ${kube_flags[@]}
  kubectl create -f hack/testdata/secret.yaml ${kube_flags[@]}
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
  kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:'
  kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
  # Set env of deployments for all container
  kubectl set env deployment nginx-deployment env=prod ${kube_flags[@]}
  # Set env of deployments for specific container
  kubectl set env deployment nginx-deployment env=prod -c=nginx ${kube_flags[@]}
  # Set env of deployments by configmap
  kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config ${kube_flags[@]}
  # Set env of deployments by secret
  kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret ${kube_flags[@]}
  # Remove specific env of deployment
  kubectl set env deployment nginx-deployment env-
  # Clean up
  kubectl delete deployment nginx-deployment ${kube_flags[@]}
  kubectl delete configmap test-set-env-config ${kube_flags[@]}
  kubectl delete secret test-set-env-secret ${kube_flags[@]}

  ### Delete a deployment with initializer
  # Pre-condition: no deployment exists
  kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
  # Create a deployment
  kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}" || true
  kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web'
  # Delete a deployment
  kubectl delete deployment web ${kube_flags[@]}
  # Check Deployment web doesn't exist
  setvar output_message = $(! kubectl get deployment web 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} '"web" not found'

  set +o nounset
  set +o errexit
}

proc run_rs_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:replicasets)"

  ### Create and stop a replica set, make sure it doesn't leak pods
  # Pre-condition: no replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]}
  kube::log::status "Deleting rs"
  kubectl delete rs frontend ${kube_flags[@]}
  # Post-condition: no pods from frontend replica set
  kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
  # Pre-condition: no replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]}
  kube::log::status "Deleting rs"
  kubectl delete rs frontend ${kube_flags[@]} --cascade=false
  # Wait for the rs to be deleted.
  kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Post-condition: All 3 pods still remain from frontend replica set
  kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
  # Cleanup
  kubectl delete pods -l "tier=frontend" ${kube_flags[@]}
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create replica set frontend from YAML
  # Pre-condition: no replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]}
  # Post-condition: frontend replica set is created
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  # Describe command should print detailed information
  kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
  # Describe command should print events information by default
  kube::test::describe_object_events_assert rs 'frontend'
  # Describe command should not print events information when show-events=false
  kube::test::describe_object_events_assert rs 'frontend' false
  # Describe command should print events information when show-events=true
  kube::test::describe_object_events_assert rs 'frontend' true
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
  # Describe command should print events information by default
  kube::test::describe_resource_events_assert rs
  # Describe command should not print events information when show-events=false
  kube::test::describe_resource_events_assert rs false
  # Describe command should print events information when show-events=true
  kube::test::describe_resource_events_assert rs true
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"

  ### Scale replica set frontend with current-replicas and replicas
  # Pre-condition: 3 replicas
  kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
  # Command
  kubectl scale --current-replicas=3 --replicas=2 replicasets frontend ${kube_flags[@]}
  # Post-condition: 2 replicas
  kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'

  # Set up three deploy, two deploy have same label
  kubectl create -f hack/testdata/scale-deploy-1.yaml ${kube_flags[@]}
  kubectl create -f hack/testdata/scale-deploy-2.yaml ${kube_flags[@]}
  kubectl create -f hack/testdata/scale-deploy-3.yaml ${kube_flags[@]}
  kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
  kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
  kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
  # Test kubectl scale --selector
  kubectl scale deploy --replicas=2 -l run=hello
  kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
  kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
  kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
  # Test kubectl scale --all
  kubectl scale deploy --replicas=3 --all
  kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
  kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
  kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
  # Clean-up
  kubectl delete rs frontend ${kube_flags[@]}
  kubectl delete deploy scale-1 scale-2 scale-3 ${kube_flags[@]}

  ### Expose replica set as service
  kubectl create -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]}
  # Pre-condition: 3 replicas
  kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
  # Command
  kubectl expose rs frontend --port=80 ${kube_flags[@]}
  # Post-condition: service exists and the port is unnamed
  kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
  # Create a service using service/v1 generator
  kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 ${kube_flags[@]}
  # Post-condition: service exists and the port is named default.
  kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
  # Cleanup services
  kubectl delete service frontend{,-2} ${kube_flags[@]}

  # Test set commands
  # Pre-condition: frontend replica set exists at generation 1
  kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
  kubectl set image rs/frontend ${kube_flags[@]} *=gcr.io/google-containers/pause:test-cmd
  kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
  kubectl set env rs/frontend ${kube_flags[@]} foo=bar
  kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
  kubectl set resources rs/frontend ${kube_flags[@]} --limits=cpu=200m,memory=512Mi
  kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'

  ### Delete replica set with id
  # Pre-condition: frontend replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
  # Command
  kubectl delete rs frontend ${kube_flags[@]}
  # Post-condition: no replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Create two replica sets
  # Pre-condition: no replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]}
  kubectl create -f hack/testdata/redis-slave-replicaset.yaml ${kube_flags[@]}
  # Post-condition: frontend and redis-slave
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'

  ### Delete multiple replica sets at once
  # Pre-condition: frontend and redis-slave
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
  # Command
  kubectl delete rs frontend redis-slave ${kube_flags[@]} # delete multiple replica sets at once
  # Post-condition: no replica set exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Delete a rs with initializer
  # Pre-condition: no rs exists
  kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
  # Create a rs
  kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}" || true
  kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx'
  # Delete a rs
  kubectl delete rs nginx ${kube_flags[@]}
  # check rs nginx doesn't exist
  setvar output_message = $(! kubectl get rs nginx 2>&1 "${kube_flags[@]}")
  kube::test::if_has_string ${output_message} '"nginx" not found'

  if kube::test::if_supports_resource ${horizontalpodautoscalers}  {
    ### Auto scale replica set
    # Pre-condition: no replica set exists
    kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
    # Command
    kubectl create -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]}
    kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
    # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
    kubectl autoscale -f hack/testdata/frontend-replicaset.yaml ${kube_flags[@]} --max=2 --cpu-percent=70
    kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
    kubectl delete hpa frontend ${kube_flags[@]}
    # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
    kubectl autoscale rs frontend ${kube_flags[@]} --min=2 --max=3
    kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
    kubectl delete hpa frontend ${kube_flags[@]}
    # autoscale without specifying --max should fail
    ! kubectl autoscale rs frontend ${kube_flags[@]}
    # Clean up
    kubectl delete rs frontend ${kube_flags[@]}
  }

  set +o nounset
  set +o errexit
}

proc run_daemonset_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:daemonsets)"

  ### Create a rolling update DaemonSet
  # Pre-condition: no DaemonSet exists
  kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml ${kube_flags[@]}
  # Template Generation should be 1
  kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
  kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml ${kube_flags[@]}
  # Template Generation should stay 1
  kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
  # Test set commands
  kubectl set image daemonsets/bind ${kube_flags[@]} *=gcr.io/google-containers/pause:test-cmd
  kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
  kubectl set env daemonsets/bind ${kube_flags[@]} foo=bar
  kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
  kubectl set resources daemonsets/bind ${kube_flags[@]} --limits=cpu=200m,memory=512Mi
  kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'

  # Clean up
  kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_daemonset_history_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"

  ### Test rolling back a DaemonSet
  # Pre-condition: no DaemonSet or its pods exists
  kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  # Create a DaemonSet (revision 1)
  kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record ${kube_flags[@]}
  kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
  # Rollback to revision 1 - should be no-op
  kubectl rollout undo daemonset --to-revision=1 ${kube_flags[@]}
  kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
  # Update the DaemonSet (revision 2)
  kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record ${kube_flags[@]}
  kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
  kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
  kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
  # Rollback to revision 1 with dry-run - should be no-op
  kubectl rollout undo daemonset --dry-run=true ${kube_flags[@]}
  kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
  # Rollback to revision 1
  kubectl rollout undo daemonset --to-revision=1 ${kube_flags[@]}
  kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
  # Rollback to revision 1000000 - should fail
  setvar output_message = $(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
  kube::test::if_has_string ${output_message} "unable to find specified revision"
  kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
  # Rollback to last revision
  kubectl rollout undo daemonset ${kube_flags[@]}
  kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
  kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
  kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
  # Clean up
  kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_statefulset_history_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"

  ### Test rolling back a StatefulSet
  # Pre-condition: no statefulset or its pods exists
  kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  # Create a StatefulSet (revision 1)
  kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record ${kube_flags[@]}
  kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
  # Rollback to revision 1 - should be no-op
  kubectl rollout undo statefulset --to-revision=1 ${kube_flags[@]}
  kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
  # Update the statefulset (revision 2)
  kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record ${kube_flags[@]}
  kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
  kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
  kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
  # Rollback to revision 1 with dry-run - should be no-op
  kubectl rollout undo statefulset --dry-run=true ${kube_flags[@]}
  kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
  # Rollback to revision 1
  kubectl rollout undo statefulset --to-revision=1 ${kube_flags[@]}
  kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
  # Rollback to revision 1000000 - should fail
  setvar output_message = $(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
  kube::test::if_has_string ${output_message} "unable to find specified revision"
  kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
  # Rollback to last revision
  kubectl rollout undo statefulset ${kube_flags[@]}
  kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
  kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
  kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
  # Clean up - delete newest configuration
  kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml ${kube_flags[@]}
  # Post-condition: no pods from statefulset controller
  wait-for-pods-with-label "app=nginx-statefulset" ""

  set +o nounset
  set +o errexit
}

proc run_multi_resources_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:multiple resources)"

  setvar FILES = ""hack/testdata/multi-resource-yaml
  hack/testdata/multi-resource-list
  hack/testdata/multi-resource-json
  hack/testdata/multi-resource-rclist
  hack/testdata/multi-resource-svclist""
  setvar YAML = "".yaml""
  setvar JSON = "".json""
  for file in $FILES {
    if test -f $file$YAML
    {
      setvar file = "$file$YAML"
      setvar replace_file = ""${file%.yaml}-modify.yaml""
    } else {
      setvar file = "$file$JSON"
      setvar replace_file = ""${file%.json}-modify.json""
    }

    setvar has_svc = 'true'
    setvar has_rc = 'true'
    setvar two_rcs = 'false'
    setvar two_svcs = 'false'
    if [[ "${file}" == *rclist* ]] {
      setvar has_svc = 'false'
      setvar two_rcs = 'true'
    }
    if [[ "${file}" == *svclist* ]] {
      setvar has_rc = 'false'
      setvar two_svcs = 'true'
    }

    ### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
    ### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
    ### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
    echo "Testing with file ${file} and replace with file ${replace_file}"
    # Pre-condition: no service (other than default kubernetes services) or replication controller exists
    kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
    kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
    # Command
    kubectl create -f ${file} ${kube_flags[@]}
    # Post-condition: mock service (and mock2) exists
    if test $has_svc = true {
      if test $two_svcs = true {
        kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
      } else {
        kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
      }
    }
    # Post-condition: mock rc (and mock2) exists
    if test $has_rc = true {
      if test $two_rcs = true {
        kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
      } else {
        kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
      }
    }
    # Command
    kubectl get -f ${file} ${kube_flags[@]}
    # Command: watching multiple resources should return "not supported" error
    setvar WATCH_ERROR_FILE = ""${KUBE_TEMP}/kubectl-watch-error""
    kubectl get -f ${file} ${kube_flags[@]} "--watch" 2> ${WATCH_ERROR_FILE} || true
    if ! grep -q "watch is only supported on individual resources and resource collections" ${WATCH_ERROR_FILE} {
      kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
    }
    kubectl describe -f ${file} ${kube_flags[@]}
    # Command
    kubectl replace -f $replace_file --force --cascade ${kube_flags[@]}
    # Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
    if test $has_svc = true {
      kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
      if test $two_svcs = true {
        kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
      }
    }
    if test $has_rc = true {
      kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
      if test $two_rcs = true {
        kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
      }
    }
    # Command: kubectl edit multiple resources
    setvar temp_editor = ""${KUBE_TEMP}/tmp-editor.sh""
    echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
    chmod +x ${temp_editor}"
    EDITOR=${temp_editor}" kubectl edit ${kube_flags[@]} -f ${file}
    # Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
    if test $has_svc = true {
      kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
      if test $two_svcs = true {
        kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
      }
    }
    if test $has_rc = true {
      kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
      if test $two_rcs = true {
        kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
      }
    }
    # cleaning
    rm ${temp_editor}
    # Command
    # We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
    # fails on some, but not all, of the resources, retries will fail because it tries to modify
    # existing labels.
    kubectl-with-retry label -f $file labeled=true --overwrite ${kube_flags[@]}
    # Post-condition: mock service and mock rc (and mock2) are labeled
    if test $has_svc = true {
      kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
      if test $two_svcs = true {
        kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
      }
    }
    if test $has_rc = true {
      kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
      if test $two_rcs = true {
        kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
      }
    }
    # Command
    # Command
    # We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
    # fails on some, but not all, of the resources, retries will fail because it tries to modify
    # existing annotations.
    kubectl-with-retry annotate -f $file annotated=true --overwrite ${kube_flags[@]}
    # Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
    if test $has_svc = true {
      kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
      if test $two_svcs = true {
        kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
      }
    }
    if test $has_rc = true {
      kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
      if test $two_rcs = true {
        kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
      }
    }
    # Cleanup resources created
    kubectl delete -f ${file} ${kube_flags[@]}
  }

  #############################
  # Multiple Resources via URL#
  #############################

  # Pre-condition: no service (other than default kubernetes services) or replication controller exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''

  # Command
  kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml ${kube_flags[@]}

  # Post-condition: service(mock) and rc(mock) exist
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'

  # Clean up
  kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml ${kube_flags[@]}

  # Post-condition: no service (other than default kubernetes services) or replication controller exists
  kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
  kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_kubectl_config_set_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:config set)"

  kubectl config set-cluster test-cluster --server="https://does-not-work"

  # Get the api cert and add a comment to avoid flag parsing problems
  setvar cert_data = $(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")

  kubectl config set clusters.test-cluster.certificate-authority-data $cert_data --set-raw-bytes
  setvar r_writen = $(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')

  setvar encoded = $(echo -n "$cert_data" | base64)
  kubectl config set clusters.test-cluster.certificate-authority-data $encoded
  setvar e_writen = $(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')

  test $e_writen == $r_writen

  set +o nounset
  set +o errexit
}

proc run_kubectl_local_proxy_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl local proxy"

  # Make sure the UI can be proxied
  start-proxy
  check-curl-proxy-code /ui 307
  check-curl-proxy-code /api/ui 404
  check-curl-proxy-code /api/v1/namespaces 200
  if kube::test::if_supports_resource ${metrics}  {
    check-curl-proxy-code /metrics 200
  }
  if kube::test::if_supports_resource ${static}  {
    check-curl-proxy-code /static/ 200
  }
  stop-proxy

  # Make sure the in-development api is accessible by default
  start-proxy
  check-curl-proxy-code /apis 200
  check-curl-proxy-code /apis/extensions/ 200
  stop-proxy

  # Custom paths let you see everything.
  start-proxy /custom
  check-curl-proxy-code /custom/ui 307
  if kube::test::if_supports_resource ${metrics}  {
    check-curl-proxy-code /custom/metrics 200
  }
  check-curl-proxy-code /custom/api/v1/namespaces 200
  stop-proxy

  set +o nounset
  set +o errexit
}

proc run_RESTMapper_evaluation_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing RESTMapper"

  setvar RESTMAPPER_ERROR_FILE = ""${KUBE_TEMP}/restmapper-error""

  ### Non-existent resource type should give a recognizeable error
  # Pre-condition: None
  # Command
  kubectl get ${kube_flags[@]} unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
  if grep -q "the server doesn't have a resource type" ${RESTMAPPER_ERROR_FILE} {
    kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
  } else {
    kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
    exit 1
  }
  rm ${RESTMAPPER_ERROR_FILE}
  # Post-condition: None

  set +o nounset
  set +o errexit
}

proc run_clusterroles_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing clusterroles"

  # make sure the server was properly bootstrapped with clusterroles and bindings
  kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
  kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'

  # test `kubectl create clusterrole`
  kubectl create ${kube_flags[@]} clusterrole pod-admin --verb=* --resource=pods
  kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
  kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
  kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
  kubectl create ${kube_flags[@]} clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
  kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
  kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
  kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
  kubectl create ${kube_flags[@]} clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
  kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
  kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
  kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
  kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
  kubectl create ${kube_flags[@]} clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/*
  kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
  kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'

  # test `kubectl create clusterrolebinding`
  # test `kubectl set subject clusterrolebinding`
  kubectl create ${kube_flags[@]} clusterrolebinding super-admin --clusterrole=admin --user=super-admin
  kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
  kubectl set subject ${kube_flags[@]} clusterrolebinding super-admin --user=foo
  kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
  kubectl create ${kube_flags[@]} clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2
  kube::test::get_object_assert clusterrolebinding/multi-users "{{range.subjects}}{{.name}}:{{end}}" 'user-1:user-2:'

  kubectl create ${kube_flags[@]} clusterrolebinding super-group --clusterrole=admin --group=the-group
  kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
  kubectl set subject ${kube_flags[@]} clusterrolebinding super-group --group=foo
  kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
  kubectl create ${kube_flags[@]} clusterrolebinding multi-groups --clusterrole=admin --group=group-1 --group=group-2
  kube::test::get_object_assert clusterrolebinding/multi-groups "{{range.subjects}}{{.name}}:{{end}}" 'group-1:group-2:'

  kubectl create ${kube_flags[@]} clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
  kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
  kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
  kubectl set subject ${kube_flags[@]} clusterrolebinding super-sa --serviceaccount=otherfoo:foo
  kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
  kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
 
  # test `kubectl create rolebinding`
  # test `kubectl set subject rolebinding`
  kubectl create ${kube_flags[@]} rolebinding admin --clusterrole=admin --user=default-admin
  kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole'
  kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
  kubectl set subject ${kube_flags[@]} rolebinding admin --user=foo
  kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:'

  kubectl create ${kube_flags[@]} rolebinding localrole --role=localrole --group=the-group
  kube::test::get_object_assert rolebinding/localrole "{{.roleRef.kind}}" 'Role'
  kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
  kubectl set subject ${kube_flags[@]} rolebinding localrole --group=foo
  kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'

  kubectl create ${kube_flags[@]} rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name
  kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
  kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
  kubectl set subject ${kube_flags[@]} rolebinding sarole --serviceaccount=otherfoo:foo
  kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
  kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'

  set +o nounset
  set +o errexit
}

proc run_role_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing role"

  # Create Role from command (only resource)
  kubectl create ${kube_flags[@]} role pod-admin --verb=* --resource=pods
  kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
  kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
  kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
  setvar output_message = $(! kubectl create "${kube_flags[@]}" role invalid-pod-admin --verb=* --resource=invalid-resource 2>&1)
  kube::test::if_has_string ${output_message} "the server doesn't have a resource type \"invalid-resource\""
  # Create Role from command (resource + group)
  kubectl create ${kube_flags[@]} role group-reader --verb=get,list --resource=deployments.extensions
  kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
  kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'deployments:'
  kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
  setvar output_message = $(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=deployments.invalid-group 2>&1)
  kube::test::if_has_string ${output_message} "the server doesn't have a resource type \"deployments\" in group \"invalid-group\""
  # Create Role from command (resource / subresource)
  kubectl create ${kube_flags[@]} role subresource-reader --verb=get,list --resource=pods/status
  kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
  kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:'
  kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
  # Create Role from command (resource + group / subresource)
  kubectl create ${kube_flags[@]} role group-subresource-reader --verb=get,list --resource=replicasets.extensions/scale
  kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
  kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'replicasets/scale:'
  kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
  setvar output_message = $(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=rs.invalid-group/scale 2>&1)
  kube::test::if_has_string ${output_message} "the server doesn't have a resource type \"rs\" in group \"invalid-group\""
  # Create Role from command (resource + resourcename)
  kubectl create ${kube_flags[@]} role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
  kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
  kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
  kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
  kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
  # Create Role from command (multi-resources)
  kubectl create ${kube_flags[@]} role resource-reader --verb=get,list --resource=pods/status,deployments.extensions
  kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
  kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
  kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'

  set +o nounset
  set +o errexit
}

proc run_assert_short_name_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing assert short name"

  kube::log::status "Testing propagation of short names for resources"
  setvar output_message = $(kubectl get --raw=/api/v1)

  ## test if a short name is exported during discovery
  kube::test::if_has_string ${output_message} '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'

  set +o nounset
  set +o errexit
}

proc run_assert_categories_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing propagation of categories for resources"
  setvar output_message = $(kubectl get --raw=/api/v1 | grep -Po '"name":"pods".*?}')
  kube::test::if_has_string ${output_message} '"categories":\["all"\]'

  set +o nounset
  set +o errexit
}

proc run_kubectl_create_error_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl create with error"

  # Passing no arguments to create is an error
  ! kubectl create

  ## kubectl create should not panic on empty string lists in a template
  setvar ERROR_FILE = ""${KUBE_TEMP}/validation-error""
  kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml ${kube_flags[@]} 2> "${ERROR_FILE}" || true
  # Post-condition: should get an error reporting the empty string
  if grep -q "unknown object type \"nil\" in ReplicationController" ${ERROR_FILE} {
    kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
  } else {
    kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
    exit 1
  }
  rm ${ERROR_FILE}

  set +o nounset
  set +o errexit
}

proc run_cmd_with_img_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing cmd with image"

  # Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
  setvar output_message = $(kubectl run test1 --image=validname)
  kube::test::if_has_string ${output_message} 'deployment "test1" created'
  kubectl delete deployments test1
  # test invalid image name
  setvar output_message = $(! kubectl run test2 --image=InvalidImageName 2>&1)
  kube::test::if_has_string ${output_message} 'error: Invalid image name "InvalidImageName": invalid reference format'

  set +o nounset
  set +o errexit
}

proc run_client_config_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing client config"

  # Command
  # Pre-condition: kubeconfig "missing" is not a file or directory
  setvar output_message = $(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
  kube::test::if_has_string ${output_message} "missing: no such file or directory"

  # Pre-condition: kubeconfig "missing" is not a file or directory
  # Command
  setvar output_message = $(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
  # Post-condition: --user contains a valid / empty value, missing config file returns error
  kube::test::if_has_string ${output_message} "missing: no such file or directory"
  # Command
  setvar output_message = $(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
  # Post-condition: --cluster contains a "valid" value, missing config file returns error
  kube::test::if_has_string ${output_message} "missing: no such file or directory"

  # Pre-condition: context "missing-context" does not exist
  # Command
  setvar output_message = $(! kubectl get pod --context="missing-context" 2>&1)
  kube::test::if_has_string ${output_message} 'context "missing-context" does not exist'
  # Post-condition: invalid or missing context returns error

  # Pre-condition: cluster "missing-cluster" does not exist
  # Command
  setvar output_message = $(! kubectl get pod --cluster="missing-cluster" 2>&1)
  kube::test::if_has_string ${output_message} 'cluster "missing-cluster" does not exist'
  # Post-condition: invalid or missing cluster returns error

  # Pre-condition: user "missing-user" does not exist
  # Command
  setvar output_message = $(! kubectl get pod --user="missing-user" 2>&1)
  kube::test::if_has_string ${output_message} 'auth info "missing-user" does not exist'
  # Post-condition: invalid or missing user returns error

  # test invalid config
  kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
  setvar output_message = $(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
  kube::test::if_has_string ${output_message} "Error loading config file"

  setvar output_message = $(! kubectl get pod --kubeconfig=missing-config 2>&1)
  kube::test::if_has_string ${output_message} 'no such file or directory'

  set +o nounset
  set +o errexit
}

proc run_service_accounts_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing service accounts"

  ### Create a new namespace
  # Pre-condition: the test-service-accounts namespace does not exist
  kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':'
  # Command
  kubectl create namespace test-service-accounts
  # Post-condition: namespace 'test-service-accounts' is created.
  kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'

  ### Create a service account in a specific namespace
  # Command
  kubectl create serviceaccount test-service-account --namespace=test-service-accounts
  # Post-condition: secret exists and has expected values
  kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
  # Clean-up
  kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
  # Clean up
  kubectl delete namespace test-service-accounts

  set +o nounset
  set +o errexit
}

proc run_pod_templates_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing pod templates"

  ### Create PODTEMPLATE
  # Pre-condition: no PODTEMPLATE
  kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json ${kube_flags[@]}
  # Post-condition: nginx PODTEMPLATE is available
  kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'

  ### Printing pod templates works
  kubectl get podtemplates ${kube_flags[@]}
  [[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]

  ### Delete nginx pod template by name
  # Pre-condition: nginx pod template is available
  kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
  # Command
  kubectl delete podtemplate nginx ${kube_flags[@]}
  # Post-condition: No templates exist
  kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_stateful_set_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:statefulsets)"

  ### Create and stop statefulset, make sure it doesn't leak pods
  # Pre-condition: no statefulset exists
  kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command: create statefulset
  kubectl create -f hack/testdata/rollingupdate-statefulset.yaml ${kube_flags[@]}

  ### Scale statefulset test with current-replicas and replicas
  # Pre-condition: 0 replicas
  kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
  kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
  # Command: Scale up
  kubectl scale --current-replicas=0 --replicas=1 statefulset nginx ${kube_flags[@]}
  # Post-condition: 1 replica, named nginx-0
  kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
  kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
  # Typically we'd wait and confirm that N>1 replicas are up, but this framework
  # doesn't start  the scheduler, so pet-0 will block all others.
  # TODO: test robust scaling in an e2e.
  wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"

  ### Clean up
  kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml ${kube_flags[@]}
  # Post-condition: no pods from statefulset controller
  wait-for-pods-with-label "app=nginx-statefulset" ""

  set +o nounset
  set +o errexit

}

proc run_lists_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing kubectl(v1:lists)"

  ### Create a List with objects from multiple versions
  # Command
  kubectl create -f hack/testdata/list.yaml ${kube_flags[@]}

  ### Delete the List with objects from multiple versions
  # Command
  kubectl delete service/list-service-test deployment/list-deployment-test

  set +o nounset
  set +o errexit
}

proc run_persistent_volumes_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing persistent volumes"

  ### Create and delete persistent volume examples
  # Pre-condition: no persistent volumes currently exist
  kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml ${kube_flags[@]}
  kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
  kubectl delete pv pv0001 ${kube_flags[@]}
  kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml ${kube_flags[@]}
  kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
  kubectl delete pv pv0002 ${kube_flags[@]}
  kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml ${kube_flags[@]}
  kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
  kubectl delete pv pv0003 ${kube_flags[@]}
  # Post-condition: no PVs
  kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_persistent_volume_claims_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing persistent volumes claims"

  ### Create and delete persistent volume claim examples
  # Pre-condition: no persistent volume claims currently exist
  kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml ${kube_flags[@]}
  kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
  kubectl delete pvc myclaim-1 ${kube_flags[@]}

  kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml ${kube_flags[@]}
  kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
  kubectl delete pvc myclaim-2 ${kube_flags[@]}

  kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json ${kube_flags[@]}
  kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
  kubectl delete pvc myclaim-3 ${kube_flags[@]}
  # Post-condition: no PVCs
  kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_storage_class_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing storage class"

  ### Create and delete storage class
  # Pre-condition: no storage classes currently exist
  kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create -f - ${kube_flags[@]} <<< """
{
  "kind": "StorageClass",
  "apiVersion": "storage.k8s.io/v1",
  "metadata": {
  "name": "storage-class-name"
  },
  "provisioner": "kubernetes.io/fake-provisioner-type",
  "parameters": {
  "zone":"us-east-1b",
  "type":"ssd"
  }
}
"""
  kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
  kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
  kubectl delete storageclass storage-class-name ${kube_flags[@]}
  # Post-condition: no storage classes
  kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit

}

proc run_nodes_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl(v1:nodes)"

  kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'

  kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
  # Describe command should print events information by default
  kube::test::describe_object_events_assert nodes "127.0.0.1"
  # Describe command should not print events information when show-events=false
  kube::test::describe_object_events_assert nodes "127.0.0.1" false
  # Describe command should print events information when show-events=true
  kube::test::describe_object_events_assert nodes "127.0.0.1" true
  # Describe command (resource only) should print detailed information
  kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
  # Describe command should print events information by default
  kube::test::describe_resource_events_assert nodes
  # Describe command should not print events information when show-events=false
  kube::test::describe_resource_events_assert nodes false
  # Describe command should print events information when show-events=true
  kube::test::describe_resource_events_assert nodes true

  ### kubectl patch update can mark node unschedulable
  # Pre-condition: node is schedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  kubectl patch ${kube_flags[@]} nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
  # Post-condition: node is unschedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
  kubectl patch ${kube_flags[@]} nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
  # Post-condition: node is schedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'

  # check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
  # but it proves that works
  kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
  kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false

  set +o nounset
  set +o errexit
}

proc run_authorization_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing authorization"

  # check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
  # but it proves that works
  kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
  kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false

  setvar SAR_RESULT_FILE = ""${KUBE_TEMP}/sar-result.json""
  curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
  if grep -q '"allowed": true' ${SAR_RESULT_FILE} {
    kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
  } else {
    kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
    exit 1
  }
  rm ${SAR_RESULT_FILE}

  setvar SAR_RESULT_FILE = ""${KUBE_TEMP}/sar-result.json""
  curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
  if grep -q '"allowed": true' ${SAR_RESULT_FILE} {
    kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
  } else {
    kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
    exit 1
  }
  rm ${SAR_RESULT_FILE}

  set +o nounset
  set +o errexit
}

proc run_retrieve_multiple_tests {
  set -o nounset
  set -o errexit

  # switch back to the default namespace
  kubectl config set-context ${CONTEXT} --namespace=""
  kube::log::status "Testing kubectl(v1:multiget)"
  kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'

  set +o nounset
  set +o errexit
}

proc run_resource_aliasing_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing resource aliasing"
  kubectl create -f examples/storage/cassandra/cassandra-controller.yaml ${kube_flags[@]}
  kubectl create -f examples/storage/cassandra/cassandra-service.yaml ${kube_flags[@]}

  setvar object = ""all -l'app=cassandra'""
  setvar request = ""{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}""

  # all 4 cassandra's might not be in the request immediately...
  kube::test::get_object_assert $object $request 'cassandra:cassandra:cassandra:cassandra:' || \
  kube::test::get_object_assert $object $request 'cassandra:cassandra:cassandra:' || \
  kube::test::get_object_assert $object $request 'cassandra:cassandra:'

  kubectl delete all -l app=cassandra ${kube_flags[@]}

  set +o nounset
  set +o errexit
}

proc run_kubectl_explain_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl(v1:explain)"
  kubectl explain pods
  # shortcuts work
  kubectl explain po
  kubectl explain po.status.message

  set +o nounset
  set +o errexit
}

proc run_swagger_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing swagger"

  # Verify schema
  setvar file = ""${KUBE_TEMP}/schema-v1.json""
  curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
  [[ "$(grep "list of returned" "${file}")" ]]
  [[ "$(grep "List of services" "${file}")" ]]
  [[ "$(grep "Watch for changes to the described resources" "${file}")" ]]

  set +o nounset
  set +o errexit
}

proc run_kubectl_sort_by_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl --sort-by"

  ### sort-by should not panic if no pod exists
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl get pods --sort-by="{metadata.name}"
  kubectl get pods --sort-by="{metadata.creationTimestamp}"

  ### sort-by should works if pod exists
  # Create POD
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  # Post-condition: valid-pod is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Check output of sort-by
  setvar output_message = $(kubectl get pods --sort-by="{metadata.name}")
  kube::test::if_has_string ${output_message} "valid-pod"
  ### Clean up
  # Pre-condition: valid-pod exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete ${kube_flags[@]} pod valid-pod --grace-period=0 --force
  # Post-condition: valid-pod doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  ### sort-by should works by sorting by name
  # Create three PODs
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create ${kube_flags[@]} -f hack/testdata/sorted-pods/sorted-pod1.yaml
  # Post-condition: sorted-pod1 is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:'
  # Command
  kubectl create ${kube_flags[@]} -f hack/testdata/sorted-pods/sorted-pod2.yaml
  # Post-condition: sorted-pod1 is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:'
  # Command
  kubectl create ${kube_flags[@]} -f hack/testdata/sorted-pods/sorted-pod3.yaml
  # Post-condition: sorted-pod1 is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'

  # Check output of sort-by '{metadata.name}'
  setvar output_message = $(kubectl get pods --sort-by="{metadata.name}")
  kube::test::if_sort_by_has_correct_order ${output_message} "sorted-pod1:sorted-pod2:sorted-pod3:"

  # Check output of sort-by '{metadata.labels.name}'
  setvar output_message = $(kubectl get pods --sort-by="{metadata.labels.name}")
  kube::test::if_sort_by_has_correct_order ${output_message} "sorted-pod3:sorted-pod2:sorted-pod1:"

  ### Clean up
  # Pre-condition: valid-pod exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
  # Command
  kubectl delete ${kube_flags[@]} pod --grace-period=0 --force --all
  # Post-condition: valid-pod doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_kubectl_all_namespace_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl --all-namespace"

  # Pre-condition: the "default" namespace exists
  kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'

  ### Create POD
  # Pre-condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # Command
  kubectl create ${kube_flags[@]} -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
  # Post-condition: valid-pod is created
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'

  ### Verify a specific namespace is ignored when all-namespaces is provided
  # Command
  kubectl get pods --all-namespaces --namespace=default

  ### Clean up
  # Pre-condition: valid-pod exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
  # Command
  kubectl delete ${kube_flags[@]} pod valid-pod --grace-period=0 --force
  # Post-condition: valid-pod doesn't exist
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_certificates_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing certificates"

  # approve
  kubectl create -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
  kubectl certificate approve foo ${kube_flags[@]}
  kubectl get csr ${kube_flags[@]} -o json
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
  kubectl delete -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''

  kubectl create -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
  kubectl certificate approve -f hack/testdata/csr.yml ${kube_flags[@]}
  kubectl get csr ${kube_flags[@]} -o json
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
  kubectl delete -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''

  # deny
  kubectl create -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
  kubectl certificate deny foo ${kube_flags[@]}
  kubectl get csr ${kube_flags[@]} -o json
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
  kubectl delete -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''

  kubectl create -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
  kubectl certificate deny -f hack/testdata/csr.yml ${kube_flags[@]}
  kubectl get csr ${kube_flags[@]} -o json
  kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
  kubectl delete -f hack/testdata/csr.yml ${kube_flags[@]}
  kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''

  set +o nounset
  set +o errexit
}

proc run_cluster_management_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing cluster-management commands"

  kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'

  ### kubectl cordon update with --dry-run does not mark node unschedulable
  # Pre-condition: node is schedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  kubectl cordon "127.0.0.1" --dry-run
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'

  ### kubectl drain update with --dry-run does not mark node unschedulable
  # Pre-condition: node is schedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  kubectl drain "127.0.0.1" --dry-run
  # Post-condition: node still exists, node is still schedulable
  kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'

  ### kubectl uncordon update with --dry-run is a no-op
  # Pre-condition: node is already schedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
  setvar response = $(kubectl uncordon "127.0.0.1" --dry-run)
  kube::test::if_has_string ${response} 'already uncordoned'
  # Post-condition: node is still schedulable
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'

  ### kubectl drain command fails when both --selector and a node argument are given
  # Pre-condition: node exists and contains label test=label
  kubectl label node "127.0.0.1" "test=label"
  kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
  setvar response = $(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
  kube::test::if_has_string ${response} 'cannot specify both a node name'

  ### kubectl cordon command fails when no arguments are passed
  # Pre-condition: node exists
  setvar response = $(! kubectl cordon 2>&1)
  kube::test::if_has_string ${response} 'error\: USAGE\: cordon NODE'

  ### kubectl cordon selects all nodes with an empty --selector=
  # Pre-condition: node "127.0.0.1" is uncordoned
  kubectl uncordon "127.0.0.1"
  setvar response = $(kubectl cordon --selector=)
  kube::test::if_has_string ${response} 'node "127.0.0.1" cordoned'
  # Post-condition: node "127.0.0.1" is cordoned
  kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'

  set +o nounset
  set +o errexit
}

proc run_plugins_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing kubectl plugins"

  # top-level plugin command
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl -h 2>&1)
  kube::test::if_has_string ${output_message} 'plugin\s\+Runs a command-line plugin'

  # no plugins
  setvar output_message = $(! kubectl plugin 2>&1)
  kube::test::if_has_string ${output_message} 'no plugins installed'

  # single plugins path
  setvar output_message = $(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin 2>&1)
  kube::test::if_has_string ${output_message} 'echo\s\+Echoes for test-cmd'
  kube::test::if_has_string ${output_message} 'get\s\+The wonderful new plugin-based get!'
  kube::test::if_has_string ${output_message} 'error\s\+The tremendous plugin that always fails!'
  kube::test::if_has_not_string ${output_message} 'The hello plugin'
  kube::test::if_has_not_string ${output_message} 'Incomplete plugin'
  kube::test::if_has_not_string ${output_message} 'no plugins installed'

  # multiple plugins path
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin -h 2>&1)
  kube::test::if_has_string ${output_message} 'echo\s\+Echoes for test-cmd'
  kube::test::if_has_string ${output_message} 'get\s\+The wonderful new plugin-based get!'
  kube::test::if_has_string ${output_message} 'error\s\+The tremendous plugin that always fails!'
  kube::test::if_has_string ${output_message} 'hello\s\+The hello plugin'
  kube::test::if_has_not_string ${output_message} 'Incomplete plugin'

  # don't override existing commands
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl get -h 2>&1)
  kube::test::if_has_string ${output_message} 'Display one or many resources'
  kube::test::if_has_not_string "$output_message{output_message}" 'The wonderful new plugin-based get'

  # plugin help
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello -h 2>&1)
  kube::test::if_has_string ${output_message} 'The hello plugin is a new plugin used by test-cmd to test multiple plugin locations.'
  kube::test::if_has_string ${output_message} 'Usage:'

  # run plugin
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello 2>&1)
  kube::test::if_has_string ${output_message} '#hello#'
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin echo 2>&1)
  kube::test::if_has_string ${output_message} 'This plugin works!'
  setvar output_message = $(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin hello 2>&1)
  kube::test::if_has_string ${output_message} 'unknown command'
  setvar output_message = $(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin error 2>&1)
  kube::test::if_has_string ${output_message} 'error: exit status 1'

  # plugin tree
  setvar output_message = $(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree 2>&1)
  kube::test::if_has_string ${output_message} 'Plugin with a tree of commands'
  kube::test::if_has_string ${output_message} 'child1\s\+The first child of a tree'
  kube::test::if_has_string ${output_message} 'child2\s\+The second child of a tree'
  kube::test::if_has_string ${output_message} 'child3\s\+The third child of a tree'
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 --help 2>&1)
  kube::test::if_has_string ${output_message} 'The first child of a tree'
  kube::test::if_has_not_string ${output_message} 'The second child'
  kube::test::if_has_not_string ${output_message} 'child2'
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 2>&1)
  kube::test::if_has_string ${output_message} 'child one'
  kube::test::if_has_not_string ${output_message} 'child1'
  kube::test::if_has_not_string ${output_message} 'The first child'

  # plugin env
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env -h 2>&1)
  kube::test::if_has_string ${output_message} "This is a flag 1"
  kube::test::if_has_string ${output_message} "This is a flag 2"
  kube::test::if_has_string ${output_message} "This is a flag 3"
  setvar output_message = $(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env --test1=value1 -t value2 2>&1)
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_CURRENT_NAMESPACE'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_CALLER'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_DESCRIPTOR_COMMAND=./env.sh'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_DESCRIPTOR_SHORT_DESC=The plugin envs plugin'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT=0'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST1=value1'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST2=value2'
  kube::test::if_has_string ${output_message} 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST3=default'

  set +o nounset
  set +o errexit
}

proc run_impersonation_tests {
  set -o nounset
  set -o errexit

  kube::log::status "Testing impersonation"

  setvar output_message = $(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
  kube::test::if_has_string ${output_message} 'without impersonating a user'

  if kube::test::if_supports_resource ${csr}  {
    # --as
    kubectl create -f hack/testdata/csr.yml ${kube_flags_with_token[@]} --as=user1
    kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
    kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
    kubectl delete -f hack/testdata/csr.yml ${kube_flags_with_token[@]}

    # --as-group
    kubectl create -f hack/testdata/csr.yml ${kube_flags_with_token[@]} --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
    kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
    kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
    kubectl delete -f hack/testdata/csr.yml ${kube_flags_with_token[@]}
  }

  set +o nounset
  set +o errexit
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
proc runTests {
  setvar foundError = ""False""

  if test -z ${SUPPORTED_RESOURCES:-} {
    echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
    exit 1
  }
  kube::log::status "Checking kubectl version"
  kubectl version

  # use timestamp as the name of namespace because increasing the variable inside subshell
  # does not affect the value of the variable outside the subshell.
  proc create_and_use_new_namespace {
    setvar namespace_number = $(date +%s%N)
    kube::log::status "Creating namespace namespace${namespace_number}"
    kubectl create namespace "namespace${namespace_number}"
    kubectl config set-context ${CONTEXT} --namespace="namespace${namespace_number}"
  }

  setvar kube_flags = ''(
    -s "http://127.0.0.1:${API_PORT}"
  )

  # token defined in hack/testdata/auth-tokens.csv
  setvar kube_flags_with_token = ''(
    -s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
  )

  if [[ -z "${ALLOW_SKEW:-}" ]] {
    setvar kube_flags = ''("--match-server-version")
    setvar kube_flags_with_token = ''("--match-server-version")
  }
  if kube::test::if_supports_resource ${nodes}  {
    test $(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}") == "v1"
  }

  setvar id_field = "".metadata.name""
  setvar labels_field = "".metadata.labels""
  setvar annotations_field = "".metadata.annotations""
  setvar service_selector_field = "".spec.selector""
  setvar rc_replicas_field = "".spec.replicas""
  setvar rc_status_replicas_field = "".status.replicas""
  setvar rc_container_image_field = "".spec.template.spec.containers""
  setvar rs_replicas_field = "".spec.replicas""
  setvar port_field = ""(index .spec.ports 0).port""
  setvar port_name = ""(index .spec.ports 0).name""
  setvar second_port_field = ""(index .spec.ports 1).port""
  setvar second_port_name = ""(index .spec.ports 1).name""
  setvar image_field = ""(index .spec.containers 0).image""
  setvar pod_container_name_field = ""(index .spec.containers 0).name""
  setvar container_name_field = ""(index .spec.template.spec.containers 0).name""
  setvar hpa_min_field = "".spec.minReplicas""
  setvar hpa_max_field = "".spec.maxReplicas""
  setvar hpa_cpu_field = "".spec.targetCPUUtilizationPercentage""
  setvar statefulset_replicas_field = "".spec.replicas""
  setvar statefulset_observed_generation = "".status.observedGeneration""
  setvar job_parallelism_field = "".spec.parallelism""
  setvar deployment_replicas = "".spec.replicas""
  setvar secret_data = "".data""
  setvar secret_type = "".type""
  setvar change_cause_annotation = ''.*kubernetes.io/change-cause.*''
  setvar pdb_min_available = "".spec.minAvailable""
  setvar pdb_max_unavailable = "".spec.maxUnavailable""
  setvar generation_field = "".metadata.generation""
  setvar template_generation_field = "".spec.templateGeneration""
  setvar container_len = ""(len .spec.template.spec.containers)""
  setvar image_field0 = ""(index .spec.template.spec.containers 0).image""
  setvar image_field1 = ""(index .spec.template.spec.containers 1).image""

  # Make sure "default" namespace exists.
  if kube::test::if_supports_resource ${namespaces}  {
    setvar output_message = $(kubectl get "${kube_flags[@]}" namespaces)
    if [[ ! $(echo "${output_message}" | grep "default") ]] {
      # Create default namespace
      kubectl create ${kube_flags[@]} ns default
    }
  }

  # Make sure "kubernetes" service exists.
  if kube::test::if_supports_resource ${services}  {
    # Attempt to create the kubernetes service, tolerating failure (since it might already exist)
    kubectl create ${kube_flags[@]} -f hack/testdata/kubernetes-service.yaml || true
    # Require the service to exist (either we created it or the API server did)
    kubectl get ${kube_flags[@]} -f hack/testdata/kubernetes-service.yaml
  }

  #########################
  # Kubectl version #
  #########################

  record_command run_kubectl_version_tests

  #######################
  # kubectl config set #
  #######################

  record_command run_kubectl_config_set_tests

  #######################
  # kubectl local proxy #
  #######################

  record_command run_kubectl_local_proxy_tests

  #########################
  # RESTMapper evaluation #
  #########################

  record_command run_RESTMapper_evaluation_tests

  ################
  # Cluster Role #
  ################

  if kube::test::if_supports_resource ${clusterroles}  {
    record_command run_clusterroles_tests
  }

  ########
  # Role #
  ########
  if kube::test::if_supports_resource ${roles}  {
      record_command run_role_tests
  }

  #########################
  # Assert short name     #
  #########################

  record_command run_assert_short_name_tests

  #########################
  # Assert categories     #
  #########################

  ## test if a category is exported during discovery
  if kube::test::if_supports_resource ${pods}  {
    record_command run_assert_categories_tests
  }

  ###########################
  # POD creation / deletion #
  ###########################

  if kube::test::if_supports_resource ${pods}  {
    record_command run_pod_tests
  }

  if kube::test::if_supports_resource ${pods}  {
    record_command run_save_config_tests
  }

  if kube::test::if_supports_resource ${pods}  {
    record_command run_kubectl_create_error_tests
  }

  if kube::test::if_supports_resource ${pods}  {
    # TODO: Move apply tests to run on rs instead of pods so that they can be
    # run for federation apiserver as well.
    record_command run_kubectl_apply_tests
    record_command run_kubectl_run_tests
    record_command run_kubectl_create_filter_tests
  }

  if kube::test::if_supports_resource ${deployments}  {
    record_command run_kubectl_apply_deployments_tests
  }

  ###############
  # Kubectl get #
  ###############

  if kube::test::if_supports_resource ${pods}  {
    # TODO: Move get tests to run on rs instead of pods so that they can be
    # run for federation apiserver as well.
    record_command run_kubectl_get_tests
  }

  ##################
  # Global timeout #
  ##################

  if kube::test::if_supports_resource ${pods}  {
    # TODO: Move request timeout tests to run on rs instead of pods so that they
    # can be run for federation apiserver as well.
    record_command run_kubectl_request_timeout_tests
  }

  #####################################
  # CustomResourceDefinitions         #
  #####################################

  # customresourcedefinitions cleanup after themselves.
  if kube::test::if_supports_resource ${customresourcedefinitions}  {
    record_command run_crd_tests
  }

  #################
  # Run cmd w img #
  #################

  if kube::test::if_supports_resource ${deployments}  {
    record_command run_cmd_with_img_tests
  }


  #####################################
  # Recursive Resources via directory #
  #####################################

  if kube::test::if_supports_resource ${pods}  {
    record_command run_recursive_resources_tests
  }


  ##############
  # Namespaces #
  ##############
  if kube::test::if_supports_resource ${namespaces}  {
    record_command run_namespace_tests
  }


  ###########
  # Secrets #
  ###########
  if kube::test::if_supports_resource ${namespaces}  {
    if kube::test::if_supports_resource ${secrets}  {
      record_command run_secrets_test
    }
  }


  ######################
  # ConfigMap          #
  ######################

  if kube::test::if_supports_resource ${namespaces} {
    if kube::test::if_supports_resource ${configmaps}  {
      record_command run_configmap_tests
    }
  }

  ####################
  # Client Config    #
  ####################

  record_command run_client_config_tests

  ####################
  # Service Accounts #
  ####################

  if kube::test::if_supports_resource ${namespaces} && kube::test::if_supports_resource ${serviceaccounts}  {
    record_command run_service_accounts_tests
  }

  #################
  # Pod templates #
  #################

  if kube::test::if_supports_resource ${podtemplates}  {
    record_command run_pod_templates_tests
  }

  ############
  # Services #
  ############

  if kube::test::if_supports_resource ${services}  {
    record_command run_service_tests
  }

  ##################
  # DaemonSets     #
  ##################

  if kube::test::if_supports_resource ${daemonsets}  {
    record_command run_daemonset_tests
    if kube::test::if_supports_resource ${controllerrevisions} {
      record_command run_daemonset_history_tests
    }
  }

  ###########################
  # Replication controllers #
  ###########################

  if kube::test::if_supports_resource ${namespaces}  {
    if kube::test::if_supports_resource ${replicationcontrollers}  {
      record_command run_rc_tests
    }
  }

  ######################
  # Deployments       #
  ######################

  if kube::test::if_supports_resource ${deployments}  {
    record_command run_deployment_tests
  }

  ######################
  # Replica Sets       #
  ######################

  if kube::test::if_supports_resource ${replicasets}  {
    record_command run_rs_tests
  }

  #################
  # Stateful Sets #
  #################

  if kube::test::if_supports_resource ${statefulsets}  {
    record_command run_stateful_set_tests
    if kube::test::if_supports_resource ${controllerrevisions} {
      record_command run_statefulset_history_tests
    }
  }

  ######################
  # Lists              #
  ######################

  if kube::test::if_supports_resource ${services}  {
    if kube::test::if_supports_resource ${deployments}  {
      record_command run_lists_tests
    }
  }


  ######################
  # Multiple Resources #
  ######################
  if kube::test::if_supports_resource ${services}  {
    if kube::test::if_supports_resource ${replicationcontrollers}  {
      record_command run_multi_resources_tests
    }
  }

  ######################
  # Persistent Volumes #
  ######################

  if kube::test::if_supports_resource ${persistentvolumes}  {
    record_command run_persistent_volumes_tests
  }

  ############################
  # Persistent Volume Claims #
  ############################

  if kube::test::if_supports_resource ${persistentvolumeclaims}  {
    record_command run_persistent_volume_claims_tests
  }

  ############################
  # Storage Classes #
  ############################

  if kube::test::if_supports_resource ${storageclass}  {
    record_command run_storage_class_tests
  }

  #########
  # Nodes #
  #########

  if kube::test::if_supports_resource ${nodes}  {
    record_command run_nodes_tests
  }


  ########################
  # authorization.k8s.io #
  ########################

  if kube::test::if_supports_resource ${subjectaccessreviews}  {
    record_command run_authorization_tests
  }

  # kubectl auth can-i
  # kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes
  if kube::test::if_supports_resource ${subjectaccessreviews}  {
    setvar output_message = $(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}")
    kube::test::if_has_string ${output_message} "yes"

    setvar output_message = $(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}")
    kube::test::if_has_string ${output_message} "yes"

    setvar output_message = $(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}")
    kube::test::if_has_string ${output_message} "the server doesn't have a resource type"

    setvar output_message = $(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}")
    kube::test::if_has_string ${output_message} "yes"

    setvar output_message = $(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}")
    kube::test::if_has_string ${output_message} "subresource can not be used with NonResourceURL"

    setvar output_message = $(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}")
    kube::test::if_empty_string ${output_message}
  }

  # kubectl auth reconcile
  if kube::test::if_supports_resource ${clusterroles}  {
    kubectl auth reconcile ${kube_flags[@]} -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml
    kube::test::get_object_assert 'rolebindings -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-RB:'
    kube::test::get_object_assert 'roles -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-R:'
    kube::test::get_object_assert 'clusterrolebindings -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CRB:'
    kube::test::get_object_assert 'clusterroles -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CR:'

    kubectl delete ${kube_flags[@]} rolebindings,role,clusterroles,clusterrolebindings -n some-other-random -l test-cmd=auth
  }

  #####################
  # Retrieve multiple #
  #####################

  if kube::test::if_supports_resource ${nodes}  {
    if kube::test::if_supports_resource ${services}  {
      record_command run_retrieve_multiple_tests
    }
  }


  #####################
  # Resource aliasing #
  #####################

  if kube::test::if_supports_resource ${services}  {
    if kube::test::if_supports_resource ${replicationcontrollers}  {
      record_command run_resource_aliasing_tests
    }
  }

  ###########
  # Explain #
  ###########

  if kube::test::if_supports_resource ${pods}  {
    record_command run_kubectl_explain_tests
  }


  ###########
  # Swagger #
  ###########

  record_command run_swagger_tests

  #####################
  # Kubectl --sort-by #
  #####################

  if kube::test::if_supports_resource ${pods}  {
    record_command run_kubectl_sort_by_tests
  }

  ############################
  # Kubectl --all-namespaces #
  ############################

  if kube::test::if_supports_resource ${pods}  {
    record_command run_kubectl_all_namespace_tests
  }

  ################
  # Certificates #
  ################

  if kube::test::if_supports_resource ${csr}  {
    record_command run_certificates_tests
  }

  ######################
  # Cluster Management #
  ######################
  if kube::test::if_supports_resource ${nodes}  {
    record_command run_cluster_management_tests
  }

  ###########
  # Plugins #
  ###########

  record_command run_plugins_tests

  #################
  # Impersonation #
  #################
  record_command run_impersonation_tests

  kube::test::clear_all

  if test $foundError == "True" {
    echo "TEST FAILED"
    exit 1
  }
}

proc run_initializer_tests {
  set -o nounset
  set -o errexit

  create_and_use_new_namespace
  kube::log::status "Testing --include-uninitialized"

  ### Create a deployment
  kubectl create --request-timeout=1 -f hack/testdata/initializer-deployments.yaml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}" || true

  ### Test kubectl get --include-uninitialized
  # Command
  setvar output_message = $(kubectl get deployments 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "No resources found" should be part of the output
  kube::test::if_has_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "No resources found" should be part of the output
  kube::test::if_has_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl get deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: I assume "web" is the deployment name
  kube::test::if_has_string ${output_message} 'web'
  # Command
  setvar output_message = $(kubectl get deployments web 2>&1 "${kube_flags[@]}")
  # Post-condition: I assume "web" is the deployment name
  kube::test::if_has_string ${output_message} 'web'
  # Command
  setvar output_message = $(kubectl get deployments --show-all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "No resources found" should be part of the output
  kube::test::if_has_string ${output_message} 'No resources found'

  ### Test kubectl describe --include-uninitialized
  # Command
  setvar output_message = $(kubectl describe deployments 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "run=web" should be part of the output
  kube::test::if_has_string ${output_message} 'run=web'
  # Command
  setvar output_message = $(kubectl describe deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "run=web" should be part of the output
  kube::test::if_has_string ${output_message} 'run=web'
  # Command
  setvar output_message = $(kubectl describe deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl describe deployments web --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "run=web" should be part of the output
  kube::test::if_has_string ${output_message} 'run=web'
  # Command
  setvar output_message = $(kubectl describe deployments web --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "run=web" should be part of the output
  kube::test::if_has_string ${output_message} 'run=web'

  ### Test kubectl label --include-uninitialized
  # Command
  setvar output_message = $(kubectl label deployments labelkey1=labelvalue1 --all 2>&1 "${kube_flags[@]}")
  # Post-condition: web is labelled
  kube::test::if_has_string ${output_message} 'deployment "web" labeled'
  kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey1}}" 'labelvalue1'
  # Command
  setvar output_message = $(kubectl label deployments labelkey2=labelvalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl label deployments labelkey3=labelvalue3 -l run=web 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl label deployments labelkey4=labelvalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: web is labelled
  kube::test::if_has_string ${output_message} 'deployment "web" labeled'
  kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey4}}" 'labelvalue4'
  # Command
  setvar output_message = $(kubectl label deployments labelkey5=labelvalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl label deployments labelkey6=labelvalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: web is labelled
  kube::test::if_has_string ${output_message} 'deployment "web" labeled'
  kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey6}}" 'labelvalue6'
  # Command
  setvar output_message = $(kubectl label deployments web labelkey7=labelvalue7 2>&1 "${kube_flags[@]}")
  # Post-condition: web is labelled
  kube::test::if_has_string ${output_message} 'deployment "web" labeled'
  kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey7}}" 'labelvalue7'
  # Found All Labels
  kube::test::get_object_assert 'deployments web' "{{${labels_field}}}" 'map[labelkey1:labelvalue1 labelkey4:labelvalue4 labelkey6:labelvalue6 labelkey7:labelvalue7 run:web]'

  ### Test kubectl annotate --include-uninitialized
  # Command
  setvar output_message = $(kubectl annotate deployments annotatekey1=annotatevalue1 --all 2>&1 "${kube_flags[@]}")
  # Post-condition: DEPLOYMENT has annotation
  kube::test::if_has_string ${output_message} 'deployment "web" annotated'
  kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey1}}" 'annotatevalue1'
  # Command
  setvar output_message = $(kubectl annotate deployments annotatekey2=annotatevalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl annotate deployments annotatekey3=annotatevalue3 -l run=web 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl annotate deployments annotatekey4=annotatevalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: DEPLOYMENT has annotation
  kube::test::if_has_string ${output_message} 'deployment "web" annotated'
  kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey4}}" 'annotatevalue4'
  # Command
  setvar output_message = $(kubectl annotate deployments annotatekey5=annotatevalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl annotate deployments annotatekey6=annotatevalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: DEPLOYMENT has annotation
  kube::test::if_has_string ${output_message} 'deployment "web" annotated'
  kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey6}}" 'annotatevalue6'
  # Command
  setvar output_message = $(kubectl annotate deployments web annotatekey7=annotatevalue7 2>&1 "${kube_flags[@]}")
  # Post-condition: web DEPLOYMENT has annotation
  kube::test::if_has_string ${output_message} 'deployment "web" annotated'
  kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey7}}" 'annotatevalue7'

  ### Test kubectl edit --include-uninitialized
  test $(EDITOR=cat kubectl edit deployments 2>&1 "${kube_flags[@]}" | grep 'edit cancelled, no objects found')
  test $(EDITOR=cat kubectl edit deployments --include-uninitialized 2>&1 "${kube_flags[@]}" | grep 'Edit cancelled, no changes made.')

  ### Test kubectl set image --include-uninitialized
  # Command
  setvar output_message = $(kubectl set image deployments *=nginx:1.11 --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "image updated" should be part of the output
  kube::test::if_has_string ${output_message} 'image updated'
  # Command
  setvar output_message = $(kubectl set image deployments *=nginx:1.11 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl set image deployments *=nginx:1.11 -l run=web 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl set image deployments *=nginx:1.12 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "image updated" should be part of the output
  kube::test::if_has_string ${output_message} 'image updated'
  # Command
  setvar output_message = $(kubectl set image deployments *=nginx:1.13 -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "image updated" should be part of the output
  kube::test::if_has_string ${output_message} 'image updated'

  ### Test kubectl set resources --include-uninitialized
  # Command
  setvar output_message = $(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "resource requirements updated" should be part of the output
  kube::test::if_has_string ${output_message} 'resource requirements updated'
  # Command
  setvar output_message = $(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi -l run=web 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=200m,memory=256Mi -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "resource requirements updated" should be part of the output
  kube::test::if_has_string ${output_message} 'resource requirements updated'
  # Command
  setvar output_message = $(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=512Mi -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "resource requirements updated" should be part of the output
  kube::test::if_has_string ${output_message} 'resource requirements updated'

  ### Test kubectl set selector --include-uninitialized
  # Create a service with initializer
  kubectl create --request-timeout=1 -f hack/testdata/initializer-redis-master-service.yaml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}" || true
  # Command
  setvar output_message = $(kubectl set selector services role=padawan --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "selector updated" should be part of the output
  kube::test::if_has_string ${output_message} 'selector updated'
  # Command
  setvar output_message = $(kubectl set selector services role=padawan --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}

  ### Test kubectl set subject --include-uninitialized
  # Create a create clusterrolebinding with initializer
  kubectl create --request-timeout=1 -f hack/testdata/initializer-clusterrolebinding.yaml 2>&1 ${kube_flags[@]}2>&1 "${kube_flags[@]}" || true
  kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
  # Command
  setvar output_message = $(kubectl set subject clusterrolebinding --user=foo --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "subjects updated" should be part of the output
  kube::test::if_has_string ${output_message} 'subjects updated'
  # Command
  setvar output_message = $(kubectl set subject clusterrolebinding --user=foo --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}
  # Command
  setvar output_message = $(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "subjects updated" should be part of the output
  kube::test::if_has_string ${output_message} 'subjects updated'
  # Command
  setvar output_message = $(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "subjects updated" should be part of the output
  kube::test::if_has_string ${output_message} 'subjects updated'

  ### Test kubectl set serviceaccount --include-uninitialized
  # Command
  setvar output_message = $(kubectl set serviceaccount deployment serviceaccount1 --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "serviceaccount updated" should be part of the output
  kube::test::if_has_string ${output_message} 'serviceaccount updated'
  # Command
  setvar output_message = $(kubectl set serviceaccount deployment serviceaccount1 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The output should be empty
  kube::test::if_empty_string ${output_message}

  ### Test kubectl delete --include-uninitialized
  kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
  # Command
  setvar output_message = $(kubectl delete clusterrolebinding --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "No resources found" should be part of the output
  kube::test::if_has_string ${output_message} 'No resources found'
  # Command
  setvar output_message = $(kubectl delete clusterrolebinding --all 2>&1 "${kube_flags[@]}")
  # Post-condition: The text "deleted" should be part of the output
  kube::test::if_has_string ${output_message} 'deleted'
  kube::test::get_object_assert clusterrolebinding/super-admin "{{range.items}}{{$id_field}}:{{end}}" ''

  ### Test kubectl apply --include-uninitialized
  # Pre-Condition: no POD exists
  kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
  # apply pod a
  kubectl apply --prune --request-timeout=20 --include-uninitialized=false --all -f hack/testdata/prune/a.yaml ${kube_flags[@]} 2>&1
  # check right pod exists
  kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
  # Post-condition: Other uninitialized resources should not be pruned
  kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" 'web'
  kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
  # cleanup
  kubectl delete pod a
  # apply pod a and prune uninitialized deployments web
  kubectl apply --prune --request-timeout=20 --all -f hack/testdata/prune/a.yaml ${kube_flags[@]} 2>&1
  # check right pod exists
  kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
  # Post-condition: Other uninitialized resources should not be pruned
  kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
  kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
  # cleanup
  kubectl delete pod a
  # apply pod a and prune uninitialized deployments web
  kubectl apply --prune --request-timeout=20 --include-uninitialized --all -f hack/testdata/prune/a.yaml ${kube_flags[@]} 2>&1
  # check right pod exists
  kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
  # Post-condition: Other uninitialized resources should not be pruned
  kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
  kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
  # cleanup
  kubectl delete pod a
  kubectl delete --request-timeout=1 deploy web
  kubectl delete --request-timeout=1 service redis-master

  set +o nounset
  set +o errexit
}
    (DONE benchmarks/testdata/test-cmd-util.sh)
#!/bin/sh
#
# Copyright (c) 2007 Shawn Pearce
#

setvar test_description = ''test git fast-import utility''
source ./test-lib.sh
source "$TEST_DIRECTORY"/diff-lib.sh ;# test-lib chdir's into trash

proc verify_packs {
	for p in .git/objects/pack/*.pack
	{
		git verify-pack @ARGV $p || return
	}
}

setvar file2_data = ''file2
second line of EOF''

setvar file3_data = ''EOF
in 3rd file
 END''

setvar file4_data = 'abcd'
setvar file4_len = '4'

setvar file5_data = ''an inline file.
  we should see it later.''

setvar file6_data = ''#!/bin/sh
echo "$@"''

###
### series A
###

test_expect_success 'empty stream succeeds' '
	git config fastimport.unpackLimit 0 &&
	git fast-import </dev/null
'

test_expect_success 'truncated stream complains' '
	echo "tag foo" | test_must_fail git fast-import
'

test_expect_success 'A: create pack from stdin' '
	test_tick &&
	cat >input <<-INPUT_END &&
	blob
	mark :2
	data <<EOF
	$file2_data
	EOF

	blob
	mark :3
	data <<END
	$file3_data
	END

	blob
	mark :4
	data $file4_len
	$file4_data
	commit refs/heads/master
	mark :5
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	initial
	COMMIT

	M 644 :2 file2
	M 644 :3 file3
	M 755 :4 file4

	tag series-A
	from :5
	data <<EOF
	An annotated tag without a tagger
	EOF

	tag series-A-blob
	from :3
	data <<EOF
	An annotated tag that annotates a blob.
	EOF

	INPUT_END
	git fast-import --export-marks=marks.out <input &&
	git whatchanged master
'

test_expect_success 'A: verify pack' '
	verify_packs
'

test_expect_success 'A: verify commit' '
	cat >expect <<-EOF &&
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	initial
	EOF
	git cat-file commit master | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify tree' '
	cat >expect <<-EOF &&
	100644 blob file2
	100644 blob file3
	100755 blob file4
	EOF
	git cat-file -p master^{tree} | sed "s/ [0-9a-f]*	/ /" >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify file2' '
	echo "$file2_data" >expect &&
	git cat-file blob master:file2 >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify file3' '
	echo "$file3_data" >expect &&
	git cat-file blob master:file3 >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify file4' '
	printf "$file4_data" >expect &&
	git cat-file blob master:file4 >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify tag/series-A' '
	cat >expect <<-EOF &&
	object $(git rev-parse refs/heads/master)
	type commit
	tag series-A

	An annotated tag without a tagger
	EOF
	git cat-file tag tags/series-A >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify tag/series-A-blob' '
	cat >expect <<-EOF &&
	object $(git rev-parse refs/heads/master:file3)
	type blob
	tag series-A-blob

	An annotated tag that annotates a blob.
	EOF
	git cat-file tag tags/series-A-blob >actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify marks output' '
	cat >expect <<-EOF &&
	:2 $(git rev-parse --verify master:file2)
	:3 $(git rev-parse --verify master:file3)
	:4 $(git rev-parse --verify master:file4)
	:5 $(git rev-parse --verify master^0)
	EOF
	test_cmp expect marks.out
'

test_expect_success 'A: verify marks import' '
	git fast-import \
		--import-marks=marks.out \
		--export-marks=marks.new \
		</dev/null &&
	test_cmp expect marks.new
'

test_expect_success 'A: tag blob by sha1' '
	test_tick &&
	new_blob=$(echo testing | git hash-object --stdin) &&
	cat >input <<-INPUT_END &&
	tag series-A-blob-2
	from $(git rev-parse refs/heads/master:file3)
	data <<EOF
	Tag blob by sha1.
	EOF

	blob
	mark :6
	data <<EOF
	testing
	EOF

	commit refs/heads/new_blob
	committer  <> 0 +0000
	data 0
	M 644 :6 new_blob
	#pretend we got sha1 from fast-import
	ls "new_blob"

	tag series-A-blob-3
	from $new_blob
	data <<EOF
	Tag new_blob.
	EOF
	INPUT_END

	cat >expect <<-EOF &&
	object $(git rev-parse refs/heads/master:file3)
	type blob
	tag series-A-blob-2

	Tag blob by sha1.
	object $new_blob
	type blob
	tag series-A-blob-3

	Tag new_blob.
	EOF

	git fast-import <input &&
	git cat-file tag tags/series-A-blob-2 >actual &&
	git cat-file tag tags/series-A-blob-3 >>actual &&
	test_cmp expect actual
'

test_expect_success 'A: verify marks import does not crash' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/verify--import-marks
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	recreate from :5
	COMMIT

	from :5
	M 755 :2 copy-of-file2

	INPUT_END

	git fast-import --import-marks=marks.out <input &&
	git whatchanged verify--import-marks
'

test_expect_success 'A: verify pack' '
	verify_packs
'

test_expect_success 'A: verify diff' '
	cat >expect <<-EOF &&
	:000000 100755 0000000000000000000000000000000000000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 A	copy-of-file2
	EOF
	git diff-tree -M -r master verify--import-marks >actual &&
	compare_diff_raw expect actual &&
	test $(git rev-parse --verify master:file2) \
	    = $(git rev-parse --verify verify--import-marks:copy-of-file2)
'

test_expect_success 'A: export marks with large values' '
	test_tick &&
	mt=$(git hash-object --stdin < /dev/null) &&
	>input.blob &&
	>marks.exp &&
	>tree.exp &&

	cat >input.commit <<-EOF &&
	commit refs/heads/verify--dump-marks
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	test the sparse array dumping routines with exponentially growing marks
	COMMIT
	EOF

	i=0 l=4 m=6 n=7 &&
	while test "$i" -lt 27
	do
		cat >>input.blob <<-EOF &&
		blob
		mark :$l
		data 0
		blob
		mark :$m
		data 0
		blob
		mark :$n
		data 0
		EOF
		echo "M 100644 :$l l$i" >>input.commit &&
		echo "M 100644 :$m m$i" >>input.commit &&
		echo "M 100644 :$n n$i" >>input.commit &&

		echo ":$l $mt" >>marks.exp &&
		echo ":$m $mt" >>marks.exp &&
		echo ":$n $mt" >>marks.exp &&

		printf "100644 blob $mt\tl$i\n" >>tree.exp &&
		printf "100644 blob $mt\tm$i\n" >>tree.exp &&
		printf "100644 blob $mt\tn$i\n" >>tree.exp &&

		l=$(($l + $l)) &&
		m=$(($m + $m)) &&
		n=$(($l + $n)) &&

		i=$((1 + $i)) || return 1
	done &&

	sort tree.exp > tree.exp_s &&

	cat input.blob input.commit | git fast-import --export-marks=marks.large &&
	git ls-tree refs/heads/verify--dump-marks >tree.out &&
	test_cmp tree.exp_s tree.out &&
	test_cmp marks.exp marks.large
'

###
### series B
###

test_expect_success 'B: fail on invalid blob sha1' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/branch
	mark :1
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	corrupt
	COMMIT

	from refs/heads/master
	M 755 0000000000000000000000000000000000000001 zero1

	INPUT_END

	test_when_finished "rm -f .git/objects/pack_* .git/objects/index_*" &&
	test_must_fail git fast-import <input
'

test_expect_success 'B: accept branch name "TEMP_TAG"' '
	cat >input <<-INPUT_END &&
	commit TEMP_TAG
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	tag base
	COMMIT

	from refs/heads/master

	INPUT_END

	test_when_finished "rm -f .git/TEMP_TAG
		git gc
		git prune" &&
	git fast-import <input &&
	test -f .git/TEMP_TAG &&
	test $(git rev-parse master) = $(git rev-parse TEMP_TAG^)
'

test_expect_success 'B: accept empty committer' '
	cat >input <<-INPUT_END &&
	commit refs/heads/empty-committer-1
	committer  <> $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/empty-committer-1
		git gc
		git prune" &&
	git fast-import <input &&
	out=$(git fsck) &&
	echo "$out" &&
	test -z "$out"
'

test_expect_success 'B: accept and fixup committer with no name' '
	cat >input <<-INPUT_END &&
	commit refs/heads/empty-committer-2
	committer <a@b.com> $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/empty-committer-2
		git gc
		git prune" &&
	git fast-import <input &&
	out=$(git fsck) &&
	echo "$out" &&
	test -z "$out"
'

test_expect_success 'B: fail on invalid committer (1)' '
	cat >input <<-INPUT_END &&
	commit refs/heads/invalid-committer
	committer Name email> $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
	test_must_fail git fast-import <input
'

test_expect_success 'B: fail on invalid committer (2)' '
	cat >input <<-INPUT_END &&
	commit refs/heads/invalid-committer
	committer Name <e<mail> $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
	test_must_fail git fast-import <input
'

test_expect_success 'B: fail on invalid committer (3)' '
	cat >input <<-INPUT_END &&
	commit refs/heads/invalid-committer
	committer Name <email>> $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
	test_must_fail git fast-import <input
'

test_expect_success 'B: fail on invalid committer (4)' '
	cat >input <<-INPUT_END &&
	commit refs/heads/invalid-committer
	committer Name <email $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
	test_must_fail git fast-import <input
'

test_expect_success 'B: fail on invalid committer (5)' '
	cat >input <<-INPUT_END &&
	commit refs/heads/invalid-committer
	committer Name<email> $GIT_COMMITTER_DATE
	data <<COMMIT
	empty commit
	COMMIT
	INPUT_END

	test_when_finished "git update-ref -d refs/heads/invalid-committer" &&
	test_must_fail git fast-import <input
'

###
### series C
###

test_expect_success 'C: incremental import create pack from stdin' '
	newf=$(echo hi newf | git hash-object -w --stdin) &&
	oldf=$(git rev-parse --verify master:file2) &&
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/branch
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	second
	COMMIT

	from refs/heads/master
	M 644 $oldf file2/oldf
	M 755 $newf file2/newf
	D file3

	INPUT_END

	git fast-import <input &&
	git whatchanged branch
'

test_expect_success 'C: verify pack' '
	verify_packs
'

test_expect_success 'C: validate reuse existing blob' '
	test $newf = $(git rev-parse --verify branch:file2/newf) &&
	test $oldf = $(git rev-parse --verify branch:file2/oldf)
'

test_expect_success 'C: verify commit' '
	cat >expect <<-EOF &&
	parent $(git rev-parse --verify master^0)
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	second
	EOF

	git cat-file commit branch | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'C: validate rename result' '
	cat >expect <<-EOF &&
	:000000 100755 0000000000000000000000000000000000000000 f1fb5da718392694d0076d677d6d0e364c79b0bc A	file2/newf
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100	file2	file2/oldf
	:100644 000000 0d92e9f3374ae2947c23aa477cbc68ce598135f1 0000000000000000000000000000000000000000 D	file3
	EOF
	git diff-tree -M -r master branch >actual &&
	compare_diff_raw expect actual
'

###
### series D
###

test_expect_success 'D: inline data in commit' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/branch
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	third
	COMMIT

	from refs/heads/branch^0
	M 644 inline newdir/interesting
	data <<EOF
	$file5_data
	EOF

	M 755 inline newdir/exec.sh
	data <<EOF
	$file6_data
	EOF

	INPUT_END

	git fast-import <input &&
	git whatchanged branch
'

test_expect_success 'D: verify pack' '
	verify_packs
'

test_expect_success 'D: validate new files added' '
	cat >expect <<-EOF &&
	:000000 100755 0000000000000000000000000000000000000000 e74b7d465e52746be2b4bae983670711e6e66657 A	newdir/exec.sh
	:000000 100644 0000000000000000000000000000000000000000 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 A	newdir/interesting
	EOF
	git diff-tree -M -r branch^ branch >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'D: verify file5' '
	echo "$file5_data" >expect &&
	git cat-file blob branch:newdir/interesting >actual &&
	test_cmp expect actual
'

test_expect_success 'D: verify file6' '
	echo "$file6_data" >expect &&
	git cat-file blob branch:newdir/exec.sh >actual &&
	test_cmp expect actual
'

###
### series E
###

test_expect_success 'E: rfc2822 date, --date-format=raw' '
	cat >input <<-INPUT_END &&
	commit refs/heads/branch
	author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> Tue Feb 6 11:22:18 2007 -0500
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> Tue Feb 6 12:35:02 2007 -0500
	data <<COMMIT
	RFC 2822 type date
	COMMIT

	from refs/heads/branch^0

	INPUT_END

	test_must_fail git fast-import --date-format=raw <input
'
test_expect_success 'E: rfc2822 date, --date-format=rfc2822' '
	git fast-import --date-format=rfc2822 <input
'

test_expect_success 'E: verify pack' '
	verify_packs
'

test_expect_success 'E: verify commit' '
	cat >expect <<-EOF &&
	author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> 1170778938 -0500
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1170783302 -0500

	RFC 2822 type date
	EOF
	git cat-file commit branch | sed 1,2d >actual &&
	test_cmp expect actual
'

###
### series F
###

test_expect_success 'F: non-fast-forward update skips' '
	old_branch=$(git rev-parse --verify branch^0) &&
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/branch
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	losing things already?
	COMMIT

	from refs/heads/branch~1

	reset refs/heads/other
	from refs/heads/branch

	INPUT_END

	test_must_fail git fast-import <input &&
	# branch must remain unaffected
	test $old_branch = $(git rev-parse --verify branch^0)
'

test_expect_success 'F: verify pack' '
	verify_packs
'

test_expect_success 'F: verify other commit' '
	cat >expect <<-EOF &&
	tree $(git rev-parse branch~1^{tree})
	parent $(git rev-parse branch~1)
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	losing things already?
	EOF
	git cat-file commit other >actual &&
	test_cmp expect actual
'

###
### series G
###

test_expect_success 'G: non-fast-forward update forced' '
	old_branch=$(git rev-parse --verify branch^0) &&
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/branch
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	losing things already?
	COMMIT

	from refs/heads/branch~1

	INPUT_END
	git fast-import --force <input
'

test_expect_success 'G: verify pack' '
	verify_packs
'

test_expect_success 'G: branch changed, but logged' '
	test $old_branch != $(git rev-parse --verify branch^0) &&
	test $old_branch = $(git rev-parse --verify branch@{1})
'

###
### series H
###

test_expect_success 'H: deletall, add 1' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/H
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	third
	COMMIT

	from refs/heads/branch^0
	M 644 inline i-will-die
	data <<EOF
	this file will never exist.
	EOF

	deleteall
	M 644 inline h/e/l/lo
	data <<EOF
	$file5_data
	EOF

	INPUT_END
	git fast-import <input &&
	git whatchanged H
'

test_expect_success 'H: verify pack' '
	verify_packs
'

test_expect_success 'H: validate old files removed, new files added' '
	cat >expect <<-EOF &&
	:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D	file2/newf
	:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D	file2/oldf
	:100755 000000 85df50785d62d3b05ab03d9cbf7e4a0b49449730 0000000000000000000000000000000000000000 D	file4
	:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100	newdir/interesting	h/e/l/lo
	:100755 000000 e74b7d465e52746be2b4bae983670711e6e66657 0000000000000000000000000000000000000000 D	newdir/exec.sh
	EOF
	git diff-tree -M -r H^ H >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'H: verify file' '
	echo "$file5_data" >expect &&
	git cat-file blob H:h/e/l/lo >actual &&
	test_cmp expect actual
'

###
### series I
###

test_expect_success 'I: export-pack-edges' '
	cat >input <<-INPUT_END &&
	commit refs/heads/export-boundary
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	we have a border.  its only 40 characters wide.
	COMMIT

	from refs/heads/branch

	INPUT_END
	git fast-import --export-pack-edges=edges.list <input
'

test_expect_success 'I: verify edge list' '
	cat >expect <<-EOF &&
	.git/objects/pack/pack-.pack: $(git rev-parse --verify export-boundary)
	EOF
	sed -e s/pack-.*pack/pack-.pack/ edges.list >actual &&
	test_cmp expect actual
'

###
### series J
###

test_expect_success 'J: reset existing branch creates empty commit' '
	cat >input <<-INPUT_END &&
	commit refs/heads/J
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	create J
	COMMIT

	from refs/heads/branch

	reset refs/heads/J

	commit refs/heads/J
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	initialize J
	COMMIT

	INPUT_END
	git fast-import <input
'
test_expect_success 'J: branch has 1 commit, empty tree' '
	test 1 = $(git rev-list J | wc -l) &&
	test 0 = $(git ls-tree J | wc -l)
'

test_expect_success 'J: tag must fail on empty branch' '
	cat >input <<-INPUT_END &&
	reset refs/heads/J2

	tag wrong_tag
	from refs/heads/J2
	data <<EOF
	Tag branch that was reset.
	EOF
	INPUT_END
	test_must_fail git fast-import <input
'

###
### series K
###

test_expect_success 'K: reinit branch with from' '
	cat >input <<-INPUT_END &&
	commit refs/heads/K
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	create K
	COMMIT

	from refs/heads/branch

	commit refs/heads/K
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	redo K
	COMMIT

	from refs/heads/branch^1

	INPUT_END
	git fast-import <input
'
test_expect_success 'K: verify K^1 = branch^1' '
	test $(git rev-parse --verify branch^1) \
		= $(git rev-parse --verify K^1)
'

###
### series L
###

test_expect_success 'L: verify internal tree sorting' '
	cat >input <<-INPUT_END &&
	blob
	mark :1
	data <<EOF
	some data
	EOF

	blob
	mark :2
	data <<EOF
	other data
	EOF

	commit refs/heads/L
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	create L
	COMMIT

	M 644 :1 b.
	M 644 :1 b/other
	M 644 :1 ba

	commit refs/heads/L
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	update L
	COMMIT

	M 644 :2 b.
	M 644 :2 b/other
	M 644 :2 ba
	INPUT_END

	cat >expect <<-EXPECT_END &&
	:100644 100644 4268632... 55d3a52... M	b.
	:040000 040000 0ae5cac... 443c768... M	b
	:100644 100644 4268632... 55d3a52... M	ba
	EXPECT_END

	git fast-import <input &&
	git diff-tree --abbrev --raw L^ L >output &&
	test_cmp expect output
'

test_expect_success 'L: nested tree copy does not corrupt deltas' '
	cat >input <<-INPUT_END &&
	blob
	mark :1
	data <<EOF
	the data
	EOF

	commit refs/heads/L2
	committer C O Mitter <committer@example.com> 1112912473 -0700
	data <<COMMIT
	init L2
	COMMIT
	M 644 :1 a/b/c
	M 644 :1 a/b/d
	M 644 :1 a/e/f

	commit refs/heads/L2
	committer C O Mitter <committer@example.com> 1112912473 -0700
	data <<COMMIT
	update L2
	COMMIT
	C a g
	C a/e g/b
	M 644 :1 g/b/h
	INPUT_END

	cat >expect <<-\EOF &&
	g/b/f
	g/b/h
	EOF

	test_when_finished "git update-ref -d refs/heads/L2" &&
	git fast-import <input &&
	git ls-tree L2 g/b/ >tmp &&
	cat tmp | cut -f 2 >actual &&
	test_cmp expect actual &&
	git fsck $(git rev-parse L2)
'

###
### series M
###

test_expect_success 'M: rename file in same subdirectory' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/M1
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	file rename
	COMMIT

	from refs/heads/branch^0
	R file2/newf file2/n.e.w.f

	INPUT_END

	cat >expect <<-EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100	file2/newf	file2/n.e.w.f
	EOF
	git fast-import <input &&
	git diff-tree -M -r M1^ M1 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'M: rename file to new subdirectory' '
	cat >input <<-INPUT_END &&
	commit refs/heads/M2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	file rename
	COMMIT

	from refs/heads/branch^0
	R file2/newf i/am/new/to/you

	INPUT_END

	cat >expect <<-EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100	file2/newf	i/am/new/to/you
	EOF
	git fast-import <input &&
	git diff-tree -M -r M2^ M2 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'M: rename subdirectory to new subdirectory' '
	cat >input <<-INPUT_END &&
	commit refs/heads/M3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	file rename
	COMMIT

	from refs/heads/M2^0
	R i other/sub

	INPUT_END

	cat >expect <<-EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100	i/am/new/to/you	other/sub/am/new/to/you
	EOF
	git fast-import <input &&
	git diff-tree -M -r M3^ M3 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'M: rename root to subdirectory' '
	cat >input <<-INPUT_END &&
	commit refs/heads/M4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	rename root
	COMMIT

	from refs/heads/M2^0
	R "" sub

	INPUT_END

	cat >expect <<-EOF &&
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 R100	file2/oldf	sub/file2/oldf
	:100755 100755 85df50785d62d3b05ab03d9cbf7e4a0b49449730 85df50785d62d3b05ab03d9cbf7e4a0b49449730 R100	file4	sub/file4
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc R100	i/am/new/to/you	sub/i/am/new/to/you
	:100755 100755 e74b7d465e52746be2b4bae983670711e6e66657 e74b7d465e52746be2b4bae983670711e6e66657 R100	newdir/exec.sh	sub/newdir/exec.sh
	:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 R100	newdir/interesting	sub/newdir/interesting
	EOF
	git fast-import <input &&
	git diff-tree -M -r M4^ M4 >actual &&
	cat actual &&
	compare_diff_raw expect actual
'

###
### series N
###

test_expect_success 'N: copy file in same subdirectory' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/N1
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	file copy
	COMMIT

	from refs/heads/branch^0
	C file2/newf file2/n.e.w.f

	INPUT_END

	cat >expect <<-EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100	file2/newf	file2/n.e.w.f
	EOF
	git fast-import <input &&
	git diff-tree -C --find-copies-harder -r N1^ N1 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'N: copy then modify subdirectory' '
	cat >input <<-INPUT_END &&
	commit refs/heads/N2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	clean directory copy
	COMMIT

	from refs/heads/branch^0
	C file2 file3

	commit refs/heads/N2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	modify directory copy
	COMMIT

	M 644 inline file3/file5
	data <<EOF
	$file5_data
	EOF

	INPUT_END

	cat >expect <<-EOF &&
	:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100	newdir/interesting	file3/file5
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100	file2/newf	file3/newf
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100	file2/oldf	file3/oldf
	EOF
	git fast-import <input &&
	git diff-tree -C --find-copies-harder -r N2^^ N2 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'N: copy dirty subdirectory' '
	cat >input <<-INPUT_END &&
	commit refs/heads/N3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	dirty directory copy
	COMMIT

	from refs/heads/branch^0
	M 644 inline file2/file5
	data <<EOF
	$file5_data
	EOF

	C file2 file3
	D file2/file5

	INPUT_END

	git fast-import <input &&
	test $(git rev-parse N2^{tree}) = $(git rev-parse N3^{tree})
'

test_expect_success 'N: copy directory by id' '
	cat >expect <<-\EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100	file2/newf	file3/newf
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100	file2/oldf	file3/oldf
	EOF
	subdir=$(git rev-parse refs/heads/branch^0:file2) &&
	cat >input <<-INPUT_END &&
	commit refs/heads/N4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy by tree hash
	COMMIT

	from refs/heads/branch^0
	M 040000 $subdir file3
	INPUT_END
	git fast-import <input &&
	git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
	compare_diff_raw expect actual
'

test_expect_success PIPE 'N: read and copy directory' '
	cat >expect <<-\EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100	file2/newf	file3/newf
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100	file2/oldf	file3/oldf
	EOF
	git update-ref -d refs/heads/N4 &&
	rm -f backflow &&
	mkfifo backflow &&
	(
		exec <backflow &&
		cat <<-EOF &&
		commit refs/heads/N4
		committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
		data <<COMMIT
		copy by tree hash, part 2
		COMMIT

		from refs/heads/branch^0
		ls "file2"
		EOF
		read mode type tree filename &&
		echo "M 040000 $tree file3"
	) |
	git fast-import --cat-blob-fd=3 3>backflow &&
	git diff-tree -C --find-copies-harder -r N4^ N4 >actual &&
	compare_diff_raw expect actual
'

test_expect_success PIPE 'N: empty directory reads as missing' '
	cat <<-\EOF >expect &&
	OBJNAME
	:000000 100644 OBJNAME OBJNAME A	unrelated
	EOF
	echo "missing src" >expect.response &&
	git update-ref -d refs/heads/read-empty &&
	rm -f backflow &&
	mkfifo backflow &&
	(
		exec <backflow &&
		cat <<-EOF &&
		commit refs/heads/read-empty
		committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
		data <<COMMIT
		read "empty" (missing) directory
		COMMIT

		M 100644 inline src/greeting
		data <<BLOB
		hello
		BLOB
		C src/greeting dst1/non-greeting
		C src/greeting unrelated
		# leave behind "empty" src directory
		D src/greeting
		ls "src"
		EOF
		read -r line &&
		printf "%s\n" "$line" >response &&
		cat <<-\EOF
		D dst1
		D dst2
		EOF
	) |
	git fast-import --cat-blob-fd=3 3>backflow &&
	test_cmp expect.response response &&
	git rev-list read-empty |
	git diff-tree -r --root --stdin |
	sed "s/$_x40/OBJNAME/g" >actual &&
	test_cmp expect actual
'

test_expect_success 'N: copy root directory by tree hash' '
	cat >expect <<-\EOF &&
	:100755 000000 f1fb5da718392694d0076d677d6d0e364c79b0bc 0000000000000000000000000000000000000000 D	file3/newf
	:100644 000000 7123f7f44e39be127c5eb701e5968176ee9d78b1 0000000000000000000000000000000000000000 D	file3/oldf
	EOF
	root=$(git rev-parse refs/heads/branch^0^{tree}) &&
	cat >input <<-INPUT_END &&
	commit refs/heads/N6
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy root directory by tree hash
	COMMIT

	from refs/heads/branch^0
	M 040000 $root ""
	INPUT_END
	git fast-import <input &&
	git diff-tree -C --find-copies-harder -r N4 N6 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'N: copy root by path' '
	cat >expect <<-\EOF &&
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100	file2/newf	oldroot/file2/newf
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100	file2/oldf	oldroot/file2/oldf
	:100755 100755 85df50785d62d3b05ab03d9cbf7e4a0b49449730 85df50785d62d3b05ab03d9cbf7e4a0b49449730 C100	file4	oldroot/file4
	:100755 100755 e74b7d465e52746be2b4bae983670711e6e66657 e74b7d465e52746be2b4bae983670711e6e66657 C100	newdir/exec.sh	oldroot/newdir/exec.sh
	:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100	newdir/interesting	oldroot/newdir/interesting
	EOF
	cat >input <<-INPUT_END &&
	commit refs/heads/N-copy-root-path
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy root directory by (empty) path
	COMMIT

	from refs/heads/branch^0
	C "" oldroot
	INPUT_END
	git fast-import <input &&
	git diff-tree -C --find-copies-harder -r branch N-copy-root-path >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'N: delete directory by copying' '
	cat >expect <<-\EOF &&
	OBJID
	:100644 000000 OBJID OBJID D	foo/bar/qux
	OBJID
	:000000 100644 OBJID OBJID A	foo/bar/baz
	:000000 100644 OBJID OBJID A	foo/bar/qux
	EOF
	empty_tree=$(git mktree </dev/null) &&
	cat >input <<-INPUT_END &&
	commit refs/heads/N-delete
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	collect data to be deleted
	COMMIT

	deleteall
	M 100644 inline foo/bar/baz
	data <<DATA_END
	hello
	DATA_END
	C "foo/bar/baz" "foo/bar/qux"
	C "foo/bar/baz" "foo/bar/quux/1"
	C "foo/bar/baz" "foo/bar/quuux"
	M 040000 $empty_tree foo/bar/quux
	M 040000 $empty_tree foo/bar/quuux

	commit refs/heads/N-delete
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	delete subdirectory
	COMMIT

	M 040000 $empty_tree foo/bar/qux
	INPUT_END
	git fast-import <input &&
	git rev-list N-delete |
		git diff-tree -r --stdin --root --always |
		sed -e "s/$_x40/OBJID/g" >actual &&
	test_cmp expect actual
'

test_expect_success 'N: modify copied tree' '
	cat >expect <<-\EOF &&
	:100644 100644 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 fcf778cda181eaa1cbc9e9ce3a2e15ee9f9fe791 C100	newdir/interesting	file3/file5
	:100755 100755 f1fb5da718392694d0076d677d6d0e364c79b0bc f1fb5da718392694d0076d677d6d0e364c79b0bc C100	file2/newf	file3/newf
	:100644 100644 7123f7f44e39be127c5eb701e5968176ee9d78b1 7123f7f44e39be127c5eb701e5968176ee9d78b1 C100	file2/oldf	file3/oldf
	EOF
	subdir=$(git rev-parse refs/heads/branch^0:file2) &&
	cat >input <<-INPUT_END &&
	commit refs/heads/N5
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy by tree hash
	COMMIT

	from refs/heads/branch^0
	M 040000 $subdir file3

	commit refs/heads/N5
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	modify directory copy
	COMMIT

	M 644 inline file3/file5
	data <<EOF
	$file5_data
	EOF
	INPUT_END
	git fast-import <input &&
	git diff-tree -C --find-copies-harder -r N5^^ N5 >actual &&
	compare_diff_raw expect actual
'

test_expect_success 'N: reject foo/ syntax' '
	subdir=$(git rev-parse refs/heads/branch^0:file2) &&
	test_must_fail git fast-import <<-INPUT_END
	commit refs/heads/N5B
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy with invalid syntax
	COMMIT

	from refs/heads/branch^0
	M 040000 $subdir file3/
	INPUT_END
'

test_expect_success 'N: reject foo/ syntax in copy source' '
	test_must_fail git fast-import <<-INPUT_END
	commit refs/heads/N5C
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy with invalid syntax
	COMMIT

	from refs/heads/branch^0
	C file2/ file3
	INPUT_END
'

test_expect_success 'N: reject foo/ syntax in rename source' '
	test_must_fail git fast-import <<-INPUT_END
	commit refs/heads/N5D
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	rename with invalid syntax
	COMMIT

	from refs/heads/branch^0
	R file2/ file3
	INPUT_END
'

test_expect_success 'N: reject foo/ syntax in ls argument' '
	test_must_fail git fast-import <<-INPUT_END
	commit refs/heads/N5E
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy with invalid syntax
	COMMIT

	from refs/heads/branch^0
	ls "file2/"
	INPUT_END
'

test_expect_success 'N: copy to root by id and modify' '
	echo "hello, world" >expect.foo &&
	echo hello >expect.bar &&
	git fast-import <<-SETUP_END &&
	commit refs/heads/N7
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	hello, tree
	COMMIT

	deleteall
	M 644 inline foo/bar
	data <<EOF
	hello
	EOF
	SETUP_END

	tree=$(git rev-parse --verify N7:) &&
	git fast-import <<-INPUT_END &&
	commit refs/heads/N8
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy to root by id and modify
	COMMIT

	M 040000 $tree ""
	M 644 inline foo/foo
	data <<EOF
	hello, world
	EOF
	INPUT_END
	git show N8:foo/foo >actual.foo &&
	git show N8:foo/bar >actual.bar &&
	test_cmp expect.foo actual.foo &&
	test_cmp expect.bar actual.bar
'

test_expect_success 'N: extract subtree' '
	branch=$(git rev-parse --verify refs/heads/branch^{tree}) &&
	cat >input <<-INPUT_END &&
	commit refs/heads/N9
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	extract subtree branch:newdir
	COMMIT

	M 040000 $branch ""
	C "newdir" ""
	INPUT_END
	git fast-import <input &&
	git diff --exit-code branch:newdir N9
'

test_expect_success 'N: modify subtree, extract it, and modify again' '
	echo hello >expect.baz &&
	echo hello, world >expect.qux &&
	git fast-import <<-SETUP_END &&
	commit refs/heads/N10
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	hello, tree
	COMMIT

	deleteall
	M 644 inline foo/bar/baz
	data <<EOF
	hello
	EOF
	SETUP_END

	tree=$(git rev-parse --verify N10:) &&
	git fast-import <<-INPUT_END &&
	commit refs/heads/N11
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	copy to root by id and modify
	COMMIT

	M 040000 $tree ""
	M 100644 inline foo/bar/qux
	data <<EOF
	hello, world
	EOF
	R "foo" ""
	C "bar/qux" "bar/quux"
	INPUT_END
	git show N11:bar/baz >actual.baz &&
	git show N11:bar/qux >actual.qux &&
	git show N11:bar/quux >actual.quux &&
	test_cmp expect.baz actual.baz &&
	test_cmp expect.qux actual.qux &&
	test_cmp expect.qux actual.quux'

###
### series O
###

test_expect_success 'O: comments are all skipped' '
	cat >input <<-INPUT_END &&
	#we will
	commit refs/heads/O1
	# -- ignore all of this text
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	# $GIT_COMMITTER_NAME has inserted here for his benefit.
	data <<COMMIT
	dirty directory copy
	COMMIT

	# do not forget the import blank line!
	#
	# yes, we started from our usual base of branch^0.
	# i like branch^0.
	from refs/heads/branch^0
	# and we need to reuse file2/file5 from N3 above.
	M 644 inline file2/file5
	# otherwise the tree will be different
	data <<EOF
	$file5_data
	EOF

	# do not forget to copy file2 to file3
	C file2 file3
	#
	# or to delete file5 from file2.
	D file2/file5
	# are we done yet?

	INPUT_END

	git fast-import <input &&
	test $(git rev-parse N3) = $(git rev-parse O1)
'

test_expect_success 'O: blank lines not necessary after data commands' '
	cat >input <<-INPUT_END &&
	commit refs/heads/O2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	dirty directory copy
	COMMIT
	from refs/heads/branch^0
	M 644 inline file2/file5
	data <<EOF
	$file5_data
	EOF
	C file2 file3
	D file2/file5

	INPUT_END

	git fast-import <input &&
	test $(git rev-parse N3) = $(git rev-parse O2)
'

test_expect_success 'O: repack before next test' '
	git repack -a -d
'

test_expect_success 'O: blank lines not necessary after other commands' '
	cat >input <<-INPUT_END &&
	commit refs/heads/O3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zstring
	COMMIT
	commit refs/heads/O3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zof
	COMMIT
	checkpoint
	commit refs/heads/O3
	mark :5
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zempty
	COMMIT
	checkpoint
	commit refs/heads/O3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zcommits
	COMMIT
	reset refs/tags/O3-2nd
	from :5
	reset refs/tags/O3-3rd
	from :5
	INPUT_END

	cat >expect <<-INPUT_END &&
	string
	of
	empty
	commits
	INPUT_END

	git fast-import <input &&
	test 8 = $(find .git/objects/pack -type f | wc -l) &&
	test $(git rev-parse refs/tags/O3-2nd) = $(git rev-parse O3^) &&
	git log --reverse --pretty=oneline O3 | sed s/^.*z// >actual &&
	test_cmp expect actual
'

test_expect_success 'O: progress outputs as requested by input' '
	cat >input <<-INPUT_END &&
	commit refs/heads/O4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zstring
	COMMIT
	commit refs/heads/O4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zof
	COMMIT
	progress Two commits down, 2 to go!
	commit refs/heads/O4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zempty
	COMMIT
	progress Three commits down, 1 to go!
	commit refs/heads/O4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	zcommits
	COMMIT
	progress done!
	INPUT_END
	git fast-import <input >actual &&
	grep "progress " <input >expect &&
	test_cmp expect actual
'

###
### series P (gitlinks)
###

test_expect_success 'P: superproject & submodule mix' '
	cat >input <<-INPUT_END &&
	blob
	mark :1
	data 10
	test file

	reset refs/heads/sub
	commit refs/heads/sub
	mark :2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data 12
	sub_initial
	M 100644 :1 file

	blob
	mark :3
	data <<DATAEND
	[submodule "sub"]
		path = sub
		url = "$(pwd)/sub"
	DATAEND

	commit refs/heads/subuse1
	mark :4
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data 8
	initial
	from refs/heads/master
	M 100644 :3 .gitmodules
	M 160000 :2 sub

	blob
	mark :5
	data 20
	test file
	more data

	commit refs/heads/sub
	mark :6
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data 11
	sub_second
	from :2
	M 100644 :5 file

	commit refs/heads/subuse1
	mark :7
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data 7
	second
	from :4
	M 160000 :6 sub

	INPUT_END

	git fast-import <input &&
	git checkout subuse1 &&
	rm -rf sub &&
	mkdir sub &&
	(
		cd sub &&
		git init &&
		git fetch --update-head-ok .. refs/heads/sub:refs/heads/master &&
		git checkout master
	) &&
	git submodule init &&
	git submodule update
'

test_expect_success 'P: verbatim SHA gitlinks' '
	SUBLAST=$(git rev-parse --verify sub) &&
	SUBPREV=$(git rev-parse --verify sub^) &&

	cat >input <<-INPUT_END &&
	blob
	mark :1
	data <<DATAEND
	[submodule "sub"]
		path = sub
		url = "$(pwd)/sub"
	DATAEND

	commit refs/heads/subuse2
	mark :2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data 8
	initial
	from refs/heads/master
	M 100644 :1 .gitmodules
	M 160000 $SUBPREV sub

	commit refs/heads/subuse2
	mark :3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data 7
	second
	from :2
	M 160000 $SUBLAST sub

	INPUT_END

	git branch -D sub &&
	git gc &&
	git prune &&
	git fast-import <input &&
	test $(git rev-parse --verify subuse2) = $(git rev-parse --verify subuse1)
'

test_expect_success 'P: fail on inline gitlink' '
	test_tick &&
	cat >input <<-INPUT_END &&
	commit refs/heads/subuse3
	mark :1
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	corrupt
	COMMIT

	from refs/heads/subuse2
	M 160000 inline sub
	data <<DATA
	$SUBPREV
	DATA

	INPUT_END

	test_must_fail git fast-import <input
'

test_expect_success 'P: fail on blob mark in gitlink' '
	test_tick &&
	cat >input <<-INPUT_END &&
	blob
	mark :1
	data <<DATA
	$SUBPREV
	DATA

	commit refs/heads/subuse3
	mark :2
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	corrupt
	COMMIT

	from refs/heads/subuse2
	M 160000 :1 sub

	INPUT_END

	test_must_fail git fast-import <input
'

###
### series Q (notes)
###

test_expect_success 'Q: commit notes' '
	note1_data="The first note for the first commit" &&
	note2_data="The first note for the second commit" &&
	note3_data="The first note for the third commit" &&
	note1b_data="The second note for the first commit" &&
	note1c_data="The third note for the first commit" &&
	note2b_data="The second note for the second commit" &&

	test_tick &&
	cat >input <<-INPUT_END &&
	blob
	mark :2
	data <<EOF
	$file2_data
	EOF

	commit refs/heads/notes-test
	mark :3
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	first (:3)
	COMMIT

	M 644 :2 file2

	blob
	mark :4
	data $file4_len
	$file4_data
	commit refs/heads/notes-test
	mark :5
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	second (:5)
	COMMIT

	M 644 :4 file4

	commit refs/heads/notes-test
	mark :6
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	third (:6)
	COMMIT

	M 644 inline file5
	data <<EOF
	$file5_data
	EOF

	M 755 inline file6
	data <<EOF
	$file6_data
	EOF

	blob
	mark :7
	data <<EOF
	$note1_data
	EOF

	blob
	mark :8
	data <<EOF
	$note2_data
	EOF

	commit refs/notes/foobar
	mark :9
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	notes (:9)
	COMMIT

	N :7 :3
	N :8 :5
	N inline :6
	data <<EOF
	$note3_data
	EOF

	commit refs/notes/foobar
	mark :10
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	notes (:10)
	COMMIT

	N inline :3
	data <<EOF
	$note1b_data
	EOF

	commit refs/notes/foobar2
	mark :11
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	notes (:11)
	COMMIT

	N inline :3
	data <<EOF
	$note1c_data
	EOF

	commit refs/notes/foobar
	mark :12
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	notes (:12)
	COMMIT

	deleteall
	N inline :5
	data <<EOF
	$note2b_data
	EOF

	INPUT_END

	git fast-import <input &&
	git whatchanged notes-test
'

test_expect_success 'Q: verify pack' '
	verify_packs
'

test_expect_success 'Q: verify first commit' '
	commit1=$(git rev-parse notes-test~2) &&
	commit2=$(git rev-parse notes-test^) &&
	commit3=$(git rev-parse notes-test) &&

	cat >expect <<-EOF &&
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	first (:3)
	EOF
	git cat-file commit notes-test~2 | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify second commit' '
	cat >expect <<-EOF &&
	parent $commit1
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	second (:5)
	EOF
	git cat-file commit notes-test^ | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify third commit' '
	cat >expect <<-EOF &&
	parent $commit2
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	third (:6)
	EOF
	git cat-file commit notes-test | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first notes commit' '
	cat >expect <<-EOF &&
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	notes (:9)
	EOF
	git cat-file commit refs/notes/foobar~2 | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first notes tree' '
	cat >expect.unsorted <<-EOF &&
	100644 blob $commit1
	100644 blob $commit2
	100644 blob $commit3
	EOF
	cat expect.unsorted | sort >expect &&
	git cat-file -p refs/notes/foobar~2^{tree} | sed "s/ [0-9a-f]*	/ /" >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first note for first commit' '
	echo "$note1_data" >expect &&
	git cat-file blob refs/notes/foobar~2:$commit1 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first note for second commit' '
	echo "$note2_data" >expect &&
	git cat-file blob refs/notes/foobar~2:$commit2 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first note for third commit' '
	echo "$note3_data" >expect &&
	git cat-file blob refs/notes/foobar~2:$commit3 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify second notes commit' '
	cat >expect <<-EOF &&
	parent $(git rev-parse --verify refs/notes/foobar~2)
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	notes (:10)
	EOF
	git cat-file commit refs/notes/foobar^ | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify second notes tree' '
	cat >expect.unsorted <<-EOF &&
	100644 blob $commit1
	100644 blob $commit2
	100644 blob $commit3
	EOF
	cat expect.unsorted | sort >expect &&
	git cat-file -p refs/notes/foobar^^{tree} | sed "s/ [0-9a-f]*	/ /" >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify second note for first commit' '
	echo "$note1b_data" >expect &&
	git cat-file blob refs/notes/foobar^:$commit1 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first note for second commit' '
	echo "$note2_data" >expect &&
	git cat-file blob refs/notes/foobar^:$commit2 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify first note for third commit' '
	echo "$note3_data" >expect &&
	git cat-file blob refs/notes/foobar^:$commit3 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify third notes commit' '
	cat >expect <<-EOF &&
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	notes (:11)
	EOF
	git cat-file commit refs/notes/foobar2 | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify third notes tree' '
	cat >expect.unsorted <<-EOF &&
	100644 blob $commit1
	EOF
	cat expect.unsorted | sort >expect &&
	git cat-file -p refs/notes/foobar2^{tree} | sed "s/ [0-9a-f]*	/ /" >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify third note for first commit' '
	echo "$note1c_data" >expect &&
	git cat-file blob refs/notes/foobar2:$commit1 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify fourth notes commit' '
	cat >expect <<-EOF &&
	parent $(git rev-parse --verify refs/notes/foobar^)
	author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE

	notes (:12)
	EOF
	git cat-file commit refs/notes/foobar | sed 1d >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify fourth notes tree' '
	cat >expect.unsorted <<-EOF &&
	100644 blob $commit2
	EOF
	cat expect.unsorted | sort >expect &&
	git cat-file -p refs/notes/foobar^{tree} | sed "s/ [0-9a-f]*	/ /" >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: verify second note for second commit' '
	echo "$note2b_data" >expect &&
	git cat-file blob refs/notes/foobar:$commit2 >actual &&
	test_cmp expect actual
'

test_expect_success 'Q: deny note on empty branch' '
	cat >input <<-EOF &&
	reset refs/heads/Q0

	commit refs/heads/note-Q0
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	Note for an empty branch.
	COMMIT

	N inline refs/heads/Q0
	data <<NOTE
	some note
	NOTE
	EOF
	test_must_fail git fast-import <input
'
###
### series R (feature and option)
###

test_expect_success 'R: abort on unsupported feature' '
	cat >input <<-EOF &&
	feature no-such-feature-exists
	EOF

	test_must_fail git fast-import <input
'

test_expect_success 'R: supported feature is accepted' '
	cat >input <<-EOF &&
	feature date-format=now
	EOF

	git fast-import <input
'

test_expect_success 'R: abort on receiving feature after data command' '
	cat >input <<-EOF &&
	blob
	data 3
	hi
	feature date-format=now
	EOF

	test_must_fail git fast-import <input
'

test_expect_success 'R: only one import-marks feature allowed per stream' '
	cat >input <<-EOF &&
	feature import-marks=git.marks
	feature import-marks=git2.marks
	EOF

	test_must_fail git fast-import <input
'

test_expect_success 'R: export-marks feature results in a marks file being created' '
	cat >input <<-EOF &&
	feature export-marks=git.marks
	blob
	mark :1
	data 3
	hi

	EOF

	cat input | git fast-import &&
	grep :1 git.marks
'

test_expect_success 'R: export-marks options can be overridden by commandline options' '
	cat input | git fast-import --export-marks=other.marks &&
	grep :1 other.marks
'

test_expect_success 'R: catch typo in marks file name' '
	test_must_fail git fast-import --import-marks=nonexistent.marks </dev/null &&
	echo "feature import-marks=nonexistent.marks" |
	test_must_fail git fast-import
'

test_expect_success 'R: import and output marks can be the same file' '
	rm -f io.marks &&
	blob=$(echo hi | git hash-object --stdin) &&
	cat >expect <<-EOF &&
	:1 $blob
	:2 $blob
	EOF
	git fast-import --export-marks=io.marks <<-\EOF &&
	blob
	mark :1
	data 3
	hi

	EOF
	git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF &&
	blob
	mark :2
	data 3
	hi

	EOF
	test_cmp expect io.marks
'

test_expect_success 'R: --import-marks=foo --output-marks=foo to create foo fails' '
	rm -f io.marks &&
	test_must_fail git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF
	blob
	mark :1
	data 3
	hi

	EOF
'

test_expect_success 'R: --import-marks-if-exists' '
	rm -f io.marks &&
	blob=$(echo hi | git hash-object --stdin) &&
	echo ":1 $blob" >expect &&
	git fast-import --import-marks-if-exists=io.marks --export-marks=io.marks <<-\EOF &&
	blob
	mark :1
	data 3
	hi

	EOF
	test_cmp expect io.marks
'

test_expect_success 'R: feature import-marks-if-exists' '
	rm -f io.marks &&
	>expect &&

	git fast-import --export-marks=io.marks <<-\EOF &&
	feature import-marks-if-exists=not_io.marks
	EOF
	test_cmp expect io.marks &&

	blob=$(echo hi | git hash-object --stdin) &&

	echo ":1 $blob" >io.marks &&
	echo ":1 $blob" >expect &&
	echo ":2 $blob" >>expect &&

	git fast-import --export-marks=io.marks <<-\EOF &&
	feature import-marks-if-exists=io.marks
	blob
	mark :2
	data 3
	hi

	EOF
	test_cmp expect io.marks &&

	echo ":3 $blob" >>expect &&

	git fast-import --import-marks=io.marks \
			--export-marks=io.marks <<-\EOF &&
	feature import-marks-if-exists=not_io.marks
	blob
	mark :3
	data 3
	hi

	EOF
	test_cmp expect io.marks &&

	>expect &&

	git fast-import --import-marks-if-exists=not_io.marks \
			--export-marks=io.marks <<-\EOF &&
	feature import-marks-if-exists=io.marks
	EOF
	test_cmp expect io.marks
'

test_expect_success 'R: import to output marks works without any content' '
	cat >input <<-EOF &&
	feature import-marks=marks.out
	feature export-marks=marks.new
	EOF

	cat input | git fast-import &&
	test_cmp marks.out marks.new
'

test_expect_success 'R: import marks prefers commandline marks file over the stream' '
	cat >input <<-EOF &&
	feature import-marks=nonexistent.marks
	feature export-marks=marks.new
	EOF

	cat input | git fast-import --import-marks=marks.out &&
	test_cmp marks.out marks.new
'


test_expect_success 'R: multiple --import-marks= should be honoured' '
	cat >input <<-EOF &&
	feature import-marks=nonexistent.marks
	feature export-marks=combined.marks
	EOF

	head -n2 marks.out > one.marks &&
	tail -n +3 marks.out > two.marks &&
	git fast-import --import-marks=one.marks --import-marks=two.marks <input &&
	test_cmp marks.out combined.marks
'

test_expect_success 'R: feature relative-marks should be honoured' '
	cat >input <<-EOF &&
	feature relative-marks
	feature import-marks=relative.in
	feature export-marks=relative.out
	EOF

	mkdir -p .git/info/fast-import/ &&
	cp marks.new .git/info/fast-import/relative.in &&
	git fast-import <input &&
	test_cmp marks.new .git/info/fast-import/relative.out
'

test_expect_success 'R: feature no-relative-marks should be honoured' '
	cat >input <<-EOF &&
	feature relative-marks
	feature import-marks=relative.in
	feature no-relative-marks
	feature export-marks=non-relative.out
	EOF

	git fast-import <input &&
	test_cmp marks.new non-relative.out
'

test_expect_success 'R: feature ls supported' '
	echo "feature ls" |
	git fast-import
'

test_expect_success 'R: feature cat-blob supported' '
	echo "feature cat-blob" |
	git fast-import
'

test_expect_success 'R: cat-blob-fd must be a nonnegative integer' '
	test_must_fail git fast-import --cat-blob-fd=-1 </dev/null
'

test_expect_success !MINGW 'R: print old blob' '
	blob=$(echo "yes it can" | git hash-object -w --stdin) &&
	cat >expect <<-EOF &&
	${blob} blob 11
	yes it can

	EOF
	echo "cat-blob $blob" |
	git fast-import --cat-blob-fd=6 6>actual &&
	test_cmp expect actual
'

test_expect_success !MINGW 'R: in-stream cat-blob-fd not respected' '
	echo hello >greeting &&
	blob=$(git hash-object -w greeting) &&
	cat >expect <<-EOF &&
	${blob} blob 6
	hello

	EOF
	git fast-import --cat-blob-fd=3 3>actual.3 >actual.1 <<-EOF &&
	cat-blob $blob
	EOF
	test_cmp expect actual.3 &&
	test_must_be_empty actual.1 &&
	git fast-import 3>actual.3 >actual.1 <<-EOF &&
	option cat-blob-fd=3
	cat-blob $blob
	EOF
	test_must_be_empty actual.3 &&
	test_cmp expect actual.1
'

test_expect_success !MINGW 'R: print mark for new blob' '
	echo "effluentish" | git hash-object --stdin >expect &&
	git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
	blob
	mark :1
	data <<BLOB_END
	effluentish
	BLOB_END
	get-mark :1
	EOF
	test_cmp expect actual
'

test_expect_success !MINGW 'R: print new blob' '
	blob=$(echo "yep yep yep" | git hash-object --stdin) &&
	cat >expect <<-EOF &&
	${blob} blob 12
	yep yep yep

	EOF
	git fast-import --cat-blob-fd=6 6>actual <<-\EOF &&
	blob
	mark :1
	data <<BLOB_END
	yep yep yep
	BLOB_END
	cat-blob :1
	EOF
	test_cmp expect actual
'

test_expect_success !MINGW 'R: print new blob by sha1' '
	blob=$(echo "a new blob named by sha1" | git hash-object --stdin) &&
	cat >expect <<-EOF &&
	${blob} blob 25
	a new blob named by sha1

	EOF
	git fast-import --cat-blob-fd=6 6>actual <<-EOF &&
	blob
	data <<BLOB_END
	a new blob named by sha1
	BLOB_END
	cat-blob $blob
	EOF
	test_cmp expect actual
'

test_expect_success 'setup: big file' '
	(
		echo "the quick brown fox jumps over the lazy dog" >big &&
		for i in 1 2 3
		do
			cat big big big big >bigger &&
			cat bigger bigger bigger bigger >big ||
			exit
		done
	)
'

test_expect_success 'R: print two blobs to stdout' '
	blob1=$(git hash-object big) &&
	blob1_len=$(wc -c <big) &&
	blob2=$(echo hello | git hash-object --stdin) &&
	{
		echo ${blob1} blob $blob1_len &&
		cat big &&
		cat <<-EOF

		${blob2} blob 6
		hello

		EOF
	} >expect &&
	{
		cat <<-\END_PART1 &&
			blob
			mark :1
			data <<data_end
		END_PART1
		cat big &&
		cat <<-\EOF
			data_end
			blob
			mark :2
			data <<data_end
			hello
			data_end
			cat-blob :1
			cat-blob :2
		EOF
	} |
	git fast-import >actual &&
	test_cmp expect actual
'

test_expect_success PIPE 'R: copy using cat-file' '
	expect_id=$(git hash-object big) &&
	expect_len=$(wc -c <big) &&
	echo $expect_id blob $expect_len >expect.response &&

	rm -f blobs &&
	cat >frontend <<-\FRONTEND_END &&
	#!/bin/sh
	FRONTEND_END

	mkfifo blobs &&
	(
		export GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GIT_COMMITTER_DATE &&
		cat <<-\EOF &&
		feature cat-blob
		blob
		mark :1
		data <<BLOB
		EOF
		cat big &&
		cat <<-\EOF &&
		BLOB
		cat-blob :1
		EOF

		read blob_id type size <&3 &&
		echo "$blob_id $type $size" >response &&
		test_copy_bytes $size >blob <&3 &&
		read newline <&3 &&

		cat <<-EOF &&
		commit refs/heads/copied
		committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
		data <<COMMIT
		copy big file as file3
		COMMIT
		M 644 inline file3
		data <<BLOB
		EOF
		cat blob &&
		echo BLOB
	) 3<blobs |
	git fast-import --cat-blob-fd=3 3>blobs &&
	git show copied:file3 >actual &&
	test_cmp expect.response response &&
	test_cmp big actual
'

test_expect_success PIPE 'R: print blob mid-commit' '
	rm -f blobs &&
	echo "A blob from _before_ the commit." >expect &&
	mkfifo blobs &&
	(
		exec 3<blobs &&
		cat <<-EOF &&
		feature cat-blob
		blob
		mark :1
		data <<BLOB
		A blob from _before_ the commit.
		BLOB
		commit refs/heads/temporary
		committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
		data <<COMMIT
		Empty commit
		COMMIT
		cat-blob :1
		EOF

		read blob_id type size <&3 &&
		test_copy_bytes $size >actual <&3 &&
		read newline <&3 &&

		echo
	) |
	git fast-import --cat-blob-fd=3 3>blobs &&
	test_cmp expect actual
'

test_expect_success PIPE 'R: print staged blob within commit' '
	rm -f blobs &&
	echo "A blob from _within_ the commit." >expect &&
	mkfifo blobs &&
	(
		exec 3<blobs &&
		cat <<-EOF &&
		feature cat-blob
		commit refs/heads/within
		committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
		data <<COMMIT
		Empty commit
		COMMIT
		M 644 inline within
		data <<BLOB
		A blob from _within_ the commit.
		BLOB
		EOF

		to_get=$(
			echo "A blob from _within_ the commit." |
			git hash-object --stdin
		) &&
		echo "cat-blob $to_get" &&

		read blob_id type size <&3 &&
		test_copy_bytes $size >actual <&3 &&
		read newline <&3 &&

		echo deleteall
	) |
	git fast-import --cat-blob-fd=3 3>blobs &&
	test_cmp expect actual
'

test_expect_success 'R: quiet option results in no stats being output' '
	cat >input <<-EOF &&
	option git quiet
	blob
	data 3
	hi

	EOF

	cat input | git fast-import 2> output &&
	test_must_be_empty output
'

test_expect_success 'R: feature done means terminating "done" is mandatory' '
	echo feature done | test_must_fail git fast-import &&
	test_must_fail git fast-import --done </dev/null
'

test_expect_success 'R: terminating "done" with trailing gibberish is ok' '
	git fast-import <<-\EOF &&
	feature done
	done
	trailing gibberish
	EOF
	git fast-import <<-\EOF
	done
	more trailing gibberish
	EOF
'

test_expect_success 'R: terminating "done" within commit' '
	cat >expect <<-\EOF &&
	OBJID
	:000000 100644 OBJID OBJID A	hello.c
	:000000 100644 OBJID OBJID A	hello2.c
	EOF
	git fast-import <<-EOF &&
	commit refs/heads/done-ends
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<EOT
	Commit terminated by "done" command
	EOT
	M 100644 inline hello.c
	data <<EOT
	Hello, world.
	EOT
	C hello.c hello2.c
	done
	EOF
	git rev-list done-ends |
	git diff-tree -r --stdin --root --always |
	sed -e "s/$_x40/OBJID/g" >actual &&
	test_cmp expect actual
'

test_expect_success 'R: die on unknown option' '
	cat >input <<-EOF &&
	option git non-existing-option
	EOF

	test_must_fail git fast-import <input
'

test_expect_success 'R: unknown commandline options are rejected' '\
	test_must_fail git fast-import --non-existing-option < /dev/null
'

test_expect_success 'R: die on invalid option argument' '
	echo "option git active-branches=-5" |
	test_must_fail git fast-import &&
	echo "option git depth=" |
	test_must_fail git fast-import &&
	test_must_fail git fast-import --depth="5 elephants" </dev/null
'

test_expect_success 'R: ignore non-git options' '
	cat >input <<-EOF &&
	option non-existing-vcs non-existing-option
	EOF

	git fast-import <input
'

test_expect_success 'R: corrupt lines do not mess marks file' '
	rm -f io.marks &&
	blob=$(echo hi | git hash-object --stdin) &&
	cat >expect <<-EOF &&
	:3 0000000000000000000000000000000000000000
	:1 $blob
	:2 $blob
	EOF
	cp expect io.marks &&
	test_must_fail git fast-import --import-marks=io.marks --export-marks=io.marks <<-\EOF &&

	EOF
	test_cmp expect io.marks
'

##
## R: very large blobs
##
test_expect_success 'R: blob bigger than threshold' '
	blobsize=$((2*1024*1024 + 53)) &&
	test-genrandom bar $blobsize >expect &&
	cat >input <<-INPUT_END &&
	commit refs/heads/big-file
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	R - big file
	COMMIT

	M 644 inline big1
	data $blobsize
	INPUT_END
	cat expect >>input &&
	cat >>input <<-INPUT_END &&
	M 644 inline big2
	data $blobsize
	INPUT_END
	cat expect >>input &&
	echo >>input &&

	test_create_repo R &&
	git --git-dir=R/.git config fastimport.unpackLimit 0 &&
	git --git-dir=R/.git fast-import --big-file-threshold=1 <input
'

test_expect_success 'R: verify created pack' '
	(
		cd R &&
		verify_packs -v > ../verify
	)
'

test_expect_success 'R: verify written objects' '
	git --git-dir=R/.git cat-file blob big-file:big1 >actual &&
	test_cmp_bin expect actual &&
	a=$(git --git-dir=R/.git rev-parse big-file:big1) &&
	b=$(git --git-dir=R/.git rev-parse big-file:big2) &&
	test $a = $b
'

test_expect_success 'R: blob appears only once' '
	n=$(grep $a verify | wc -l) &&
	test 1 = $n
'

###
### series S
###
#
# Make sure missing spaces and EOLs after mark references
# cause errors.
#
# Setup:
#
#   1--2--4
#    \   /
#     -3-
#
#   commit marks:  301, 302, 303, 304
#   blob marks:              403, 404, resp.
#   note mark:          202
#
# The error message when a space is missing not at the
# end of the line is:
#
#   Missing space after ..
#
# or when extra characters come after the mark at the end
# of the line:
#
#   Garbage after ..
#
# or when the dataref is neither "inline " or a known SHA1,
#
#   Invalid dataref ..
#
test_expect_success 'S: initialize for S tests' '
	test_tick &&

	cat >input <<-INPUT_END &&
	commit refs/heads/S
	mark :301
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit 1
	COMMIT
	M 100644 inline hello.c
	data <<BLOB
	blob 1
	BLOB

	commit refs/heads/S
	mark :302
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit 2
	COMMIT
	from :301
	M 100644 inline hello.c
	data <<BLOB
	blob 2
	BLOB

	blob
	mark :403
	data <<BLOB
	blob 3
	BLOB

	blob
	mark :202
	data <<BLOB
	note 2
	BLOB
	INPUT_END

	git fast-import --export-marks=marks <input
'

#
# filemodify, three datarefs
#
test_expect_success 'S: filemodify with garbage after mark must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit N
	COMMIT
	M 100644 :403x hello.c
	EOF
	cat err &&
	test_i18ngrep "space after mark" err
'

# inline is misspelled; fast-import thinks it is some unknown dataref
test_expect_success 'S: filemodify with garbage after inline must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit N
	COMMIT
	M 100644 inlineX hello.c
	data <<BLOB
	inline
	BLOB
	EOF
	cat err &&
	test_i18ngrep "nvalid dataref" err
'

test_expect_success 'S: filemodify with garbage after sha1 must fail' '
	sha1=$(grep :403 marks | cut -d\  -f2) &&
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit N
	COMMIT
	M 100644 ${sha1}x hello.c
	EOF
	cat err &&
	test_i18ngrep "space after SHA1" err
'

#
# notemodify, three ways to say dataref
#
test_expect_success 'S: notemodify with garabge after mark dataref must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit S note dataref markref
	COMMIT
	N :202x :302
	EOF
	cat err &&
	test_i18ngrep "space after mark" err
'

test_expect_success 'S: notemodify with garbage after inline dataref must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit S note dataref inline
	COMMIT
	N inlineX :302
	data <<BLOB
	note blob
	BLOB
	EOF
	cat err &&
	test_i18ngrep "nvalid dataref" err
'

test_expect_success 'S: notemodify with garbage after sha1 dataref must fail' '
	sha1=$(grep :202 marks | cut -d\  -f2) &&
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit S note dataref sha1
	COMMIT
	N ${sha1}x :302
	EOF
	cat err &&
	test_i18ngrep "space after SHA1" err
'

#
# notemodify, mark in commit-ish
#
test_expect_success 'S: notemodify with garbage after mark commit-ish must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/Snotes
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit S note commit-ish
	COMMIT
	N :202 :302x
	EOF
	cat err &&
	test_i18ngrep "after mark" err
'

#
# from
#
test_expect_success 'S: from with garbage after mark must fail' '
	test_must_fail \
	git fast-import --import-marks=marks --export-marks=marks <<-EOF 2>err &&
	commit refs/heads/S2
	mark :303
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit 3
	COMMIT
	from :301x
	M 100644 :403 hello.c
	EOF


	# go create the commit, need it for merge test
	git fast-import --import-marks=marks --export-marks=marks <<-EOF &&
	commit refs/heads/S2
	mark :303
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	commit 3
	COMMIT
	from :301
	M 100644 :403 hello.c
	EOF

	# now evaluate the error
	cat err &&
	test_i18ngrep "after mark" err
'


#
# merge
#
test_expect_success 'S: merge with garbage after mark must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	commit refs/heads/S
	mark :304
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	merge 4
	COMMIT
	from :302
	merge :303x
	M 100644 :403 hello.c
	EOF
	cat err &&
	test_i18ngrep "after mark" err
'

#
# tag, from markref
#
test_expect_success 'S: tag with garbage after mark must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	tag refs/tags/Stag
	from :302x
	tagger $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<TAG
	tag S
	TAG
	EOF
	cat err &&
	test_i18ngrep "after mark" err
'

#
# cat-blob markref
#
test_expect_success 'S: cat-blob with garbage after mark must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	cat-blob :403x
	EOF
	cat err &&
	test_i18ngrep "after mark" err
'

#
# ls markref
#
test_expect_success 'S: ls with garbage after mark must fail' '
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	ls :302x hello.c
	EOF
	cat err &&
	test_i18ngrep "space after mark" err
'

test_expect_success 'S: ls with garbage after sha1 must fail' '
	sha1=$(grep :302 marks | cut -d\  -f2) &&
	test_must_fail git fast-import --import-marks=marks <<-EOF 2>err &&
	ls ${sha1}x hello.c
	EOF
	cat err &&
	test_i18ngrep "space after tree-ish" err
'

###
### series T (ls)
###
# Setup is carried over from series S.

test_expect_success 'T: ls root tree' '
	sed -e "s/Z\$//" >expect <<-EOF &&
	040000 tree $(git rev-parse S^{tree})	Z
	EOF
	sha1=$(git rev-parse --verify S) &&
	git fast-import --import-marks=marks <<-EOF >actual &&
	ls $sha1 ""
	EOF
	test_cmp expect actual
'

test_expect_success 'T: delete branch' '
	git branch to-delete &&
	git fast-import <<-EOF &&
	reset refs/heads/to-delete
	from 0000000000000000000000000000000000000000
	EOF
	test_must_fail git rev-parse --verify refs/heads/to-delete
'

test_expect_success 'T: empty reset doesnt delete branch' '
	git branch not-to-delete &&
	git fast-import <<-EOF &&
	reset refs/heads/not-to-delete
	EOF
	git show-ref &&
	git rev-parse --verify refs/heads/not-to-delete
'

###
### series U (filedelete)
###

test_expect_success 'U: initialize for U tests' '
	cat >input <<-INPUT_END &&
	commit refs/heads/U
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	test setup
	COMMIT
	M 100644 inline hello.c
	data <<BLOB
	blob 1
	BLOB
	M 100644 inline good/night.txt
	data <<BLOB
	sleep well
	BLOB
	M 100644 inline good/bye.txt
	data <<BLOB
	au revoir
	BLOB

	INPUT_END

	git fast-import <input
'

test_expect_success 'U: filedelete file succeeds' '
	cat >input <<-INPUT_END &&
	commit refs/heads/U
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	delete good/night.txt
	COMMIT
	from refs/heads/U^0
	D good/night.txt

	INPUT_END

	git fast-import <input
'

test_expect_success 'U: validate file delete result' '
	cat >expect <<-EOF &&
	:100644 000000 2907ebb4bf85d91bf0716bb3bd8a68ef48d6da76 0000000000000000000000000000000000000000 D	good/night.txt
	EOF

	git diff-tree -M -r U^1 U >actual &&

	compare_diff_raw expect actual
'

test_expect_success 'U: filedelete directory succeeds' '
	cat >input <<-INPUT_END &&
	commit refs/heads/U
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	delete good dir
	COMMIT
	from refs/heads/U^0
	D good

	INPUT_END

	git fast-import <input
'

test_expect_success 'U: validate directory delete result' '
	cat >expect <<-EOF &&
	:100644 000000 69cb75792f55123d8389c156b0b41c2ff00ed507 0000000000000000000000000000000000000000 D	good/bye.txt
	EOF

	git diff-tree -M -r U^1 U >actual &&

	compare_diff_raw expect actual
'

test_expect_success 'U: filedelete root succeeds' '
	cat >input <<-INPUT_END &&
	commit refs/heads/U
	committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
	data <<COMMIT
	must succeed
	COMMIT
	from refs/heads/U^0
	D ""

	INPUT_END

	git fast-import <input
'

test_expect_success 'U: validate root delete result' '
	cat >expect <<-EOF &&
	:100644 000000 c18147dc648481eeb65dc5e66628429a64843327 0000000000000000000000000000000000000000 D	hello.c
	EOF

	git diff-tree -M -r U^1 U >actual &&

	compare_diff_raw expect actual
'

test_done
    (DONE benchmarks/testdata/t9300-fast-import.sh)
#!/bin/sh
#
# Copyright (c) 2006 Junio C Hamano
#

setvar test_description = ''various format-patch tests''

source ./test-lib.sh
source "$TEST_DIRECTORY"/lib-terminal.sh

test_expect_success setup '

	for i in 1 2 3 4 5 6 7 8 9 10; do echo "$i"; done >file &&
	cat file >elif &&
	git add file elif &&
	test_tick &&
	git commit -m Initial &&
	git checkout -b side &&

	for i in 1 2 5 6 A B C 7 8 9 10; do echo "$i"; done >file &&
	test_chmod +x elif &&
	test_tick &&
	git commit -m "Side changes #1" &&

	for i in D E F; do echo "$i"; done >>file &&
	git update-index file &&
	test_tick &&
	git commit -m "Side changes #2" &&
	git tag C2 &&

	for i in 5 6 1 2 3 A 4 B C 7 8 9 10 D E F; do echo "$i"; done >file &&
	git update-index file &&
	test_tick &&
	git commit -m "Side changes #3 with \\n backslash-n in it." &&

	git checkout master &&
	git diff-tree -p C2 | git apply --index &&
	test_tick &&
	git commit -m "Master accepts moral equivalent of #2"

'

test_expect_success "format-patch --ignore-if-in-upstream" '

	git format-patch --stdout master..side >patch0 &&
	cnt=$(grep "^From " patch0 | wc -l) &&
	test $cnt = 3

'

test_expect_success "format-patch --ignore-if-in-upstream" '

	git format-patch --stdout \
		--ignore-if-in-upstream master..side >patch1 &&
	cnt=$(grep "^From " patch1 | wc -l) &&
	test $cnt = 2

'

test_expect_success "format-patch --ignore-if-in-upstream handles tags" '
	git tag -a v1 -m tag side &&
	git tag -a v2 -m tag master &&
	git format-patch --stdout --ignore-if-in-upstream v2..v1 >patch1 &&
	cnt=$(grep "^From " patch1 | wc -l) &&
	test $cnt = 2
'

test_expect_success "format-patch doesn't consider merge commits" '

	git checkout -b slave master &&
	echo "Another line" >>file &&
	test_tick &&
	git commit -am "Slave change #1" &&
	echo "Yet another line" >>file &&
	test_tick &&
	git commit -am "Slave change #2" &&
	git checkout -b merger master &&
	test_tick &&
	git merge --no-ff slave &&
	cnt=$(git format-patch -3 --stdout | grep "^From " | wc -l) &&
	test $cnt = 3
'

test_expect_success "format-patch result applies" '

	git checkout -b rebuild-0 master &&
	git am -3 patch0 &&
	cnt=$(git rev-list master.. | wc -l) &&
	test $cnt = 2
'

test_expect_success "format-patch --ignore-if-in-upstream result applies" '

	git checkout -b rebuild-1 master &&
	git am -3 patch1 &&
	cnt=$(git rev-list master.. | wc -l) &&
	test $cnt = 2
'

test_expect_success 'commit did not screw up the log message' '

	git cat-file commit side | grep "^Side .* with .* backslash-n"

'

test_expect_success 'format-patch did not screw up the log message' '

	grep "^Subject: .*Side changes #3 with .* backslash-n" patch0 &&
	grep "^Subject: .*Side changes #3 with .* backslash-n" patch1

'

test_expect_success 'replay did not screw up the log message' '

	git cat-file commit rebuild-1 | grep "^Side .* with .* backslash-n"

'

test_expect_success 'extra headers' '

	git config format.headers "To: R E Cipient <rcipient@example.com>
" &&
	git config --add format.headers "Cc: S E Cipient <scipient@example.com>
" &&
	git format-patch --stdout master..side > patch2 &&
	sed -e "/^\$/q" patch2 > hdrs2 &&
	grep "^To: R E Cipient <rcipient@example.com>\$" hdrs2 &&
	grep "^Cc: S E Cipient <scipient@example.com>\$" hdrs2

'

test_expect_success 'extra headers without newlines' '

	git config --replace-all format.headers "To: R E Cipient <rcipient@example.com>" &&
	git config --add format.headers "Cc: S E Cipient <scipient@example.com>" &&
	git format-patch --stdout master..side >patch3 &&
	sed -e "/^\$/q" patch3 > hdrs3 &&
	grep "^To: R E Cipient <rcipient@example.com>\$" hdrs3 &&
	grep "^Cc: S E Cipient <scipient@example.com>\$" hdrs3

'

test_expect_success 'extra headers with multiple To:s' '

	git config --replace-all format.headers "To: R E Cipient <rcipient@example.com>" &&
	git config --add format.headers "To: S E Cipient <scipient@example.com>" &&
	git format-patch --stdout master..side > patch4 &&
	sed -e "/^\$/q" patch4 > hdrs4 &&
	grep "^To: R E Cipient <rcipient@example.com>,\$" hdrs4 &&
	grep "^ *S E Cipient <scipient@example.com>\$" hdrs4
'

test_expect_success 'additional command line cc (ascii)' '

	git config --replace-all format.headers "Cc: R E Cipient <rcipient@example.com>" &&
	git format-patch --cc="S E Cipient <scipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch5 &&
	grep "^Cc: R E Cipient <rcipient@example.com>,\$" patch5 &&
	grep "^ *S E Cipient <scipient@example.com>\$" patch5
'

test_expect_failure 'additional command line cc (rfc822)' '

	git config --replace-all format.headers "Cc: R E Cipient <rcipient@example.com>" &&
	git format-patch --cc="S. E. Cipient <scipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch5 &&
	grep "^Cc: R E Cipient <rcipient@example.com>,\$" patch5 &&
	grep "^ *\"S. E. Cipient\" <scipient@example.com>\$" patch5
'

test_expect_success 'command line headers' '

	git config --unset-all format.headers &&
	git format-patch --add-header="Cc: R E Cipient <rcipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch6 &&
	grep "^Cc: R E Cipient <rcipient@example.com>\$" patch6
'

test_expect_success 'configuration headers and command line headers' '

	git config --replace-all format.headers "Cc: R E Cipient <rcipient@example.com>" &&
	git format-patch --add-header="Cc: S E Cipient <scipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch7 &&
	grep "^Cc: R E Cipient <rcipient@example.com>,\$" patch7 &&
	grep "^ *S E Cipient <scipient@example.com>\$" patch7
'

test_expect_success 'command line To: header (ascii)' '

	git config --unset-all format.headers &&
	git format-patch --to="R E Cipient <rcipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch8 &&
	grep "^To: R E Cipient <rcipient@example.com>\$" patch8
'

test_expect_failure 'command line To: header (rfc822)' '

	git format-patch --to="R. E. Cipient <rcipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch8 &&
	grep "^To: \"R. E. Cipient\" <rcipient@example.com>\$" patch8
'

test_expect_failure 'command line To: header (rfc2047)' '

	git format-patch --to="R Ä Cipient <rcipient@example.com>" --stdout master..side | sed -e "/^\$/q" >patch8 &&
	grep "^To: =?UTF-8?q?R=20=C3=84=20Cipient?= <rcipient@example.com>\$" patch8
'

test_expect_success 'configuration To: header (ascii)' '

	git config format.to "R E Cipient <rcipient@example.com>" &&
	git format-patch --stdout master..side | sed -e "/^\$/q" >patch9 &&
	grep "^To: R E Cipient <rcipient@example.com>\$" patch9
'

test_expect_failure 'configuration To: header (rfc822)' '

	git config format.to "R. E. Cipient <rcipient@example.com>" &&
	git format-patch --stdout master..side | sed -e "/^\$/q" >patch9 &&
	grep "^To: \"R. E. Cipient\" <rcipient@example.com>\$" patch9
'

test_expect_failure 'configuration To: header (rfc2047)' '

	git config format.to "R Ä Cipient <rcipient@example.com>" &&
	git format-patch --stdout master..side | sed -e "/^\$/q" >patch9 &&
	grep "^To: =?UTF-8?q?R=20=C3=84=20Cipient?= <rcipient@example.com>\$" patch9
'

# check_patch <patch>: Verify that <patch> looks like a half-sane
# patch email to avoid a false positive with !grep
proc check_patch {
	grep -e "^From:" $1 &&
	grep -e "^Date:" $1 &&
	grep -e "^Subject:" $1
}

test_expect_success 'format.from=false' '

	git -c format.from=false format-patch --stdout master..side |
	sed -e "/^\$/q" >patch &&
	check_patch patch &&
	! grep "^From: C O Mitter <committer@example.com>\$" patch
'

test_expect_success 'format.from=true' '

	git -c format.from=true format-patch --stdout master..side |
	sed -e "/^\$/q" >patch &&
	check_patch patch &&
	grep "^From: C O Mitter <committer@example.com>\$" patch
'

test_expect_success 'format.from with address' '

	git -c format.from="F R Om <from@example.com>" format-patch --stdout master..side |
	sed -e "/^\$/q" >patch &&
	check_patch patch &&
	grep "^From: F R Om <from@example.com>\$" patch
'

test_expect_success '--no-from overrides format.from' '

	git -c format.from="F R Om <from@example.com>" format-patch --no-from --stdout master..side |
	sed -e "/^\$/q" >patch &&
	check_patch patch &&
	! grep "^From: F R Om <from@example.com>\$" patch
'

test_expect_success '--from overrides format.from' '

	git -c format.from="F R Om <from@example.com>" format-patch --from --stdout master..side |
	sed -e "/^\$/q" >patch &&
	check_patch patch &&
	! grep "^From: F R Om <from@example.com>\$" patch
'

test_expect_success '--no-to overrides config.to' '

	git config --replace-all format.to \
		"R E Cipient <rcipient@example.com>" &&
	git format-patch --no-to --stdout master..side |
	sed -e "/^\$/q" >patch10 &&
	check_patch patch10 &&
	! grep "^To: R E Cipient <rcipient@example.com>\$" patch10
'

test_expect_success '--no-to and --to replaces config.to' '

	git config --replace-all format.to \
		"Someone <someone@out.there>" &&
	git format-patch --no-to --to="Someone Else <else@out.there>" \
		--stdout master..side |
	sed -e "/^\$/q" >patch11 &&
	check_patch patch11 &&
	! grep "^To: Someone <someone@out.there>\$" patch11 &&
	grep "^To: Someone Else <else@out.there>\$" patch11
'

test_expect_success '--no-cc overrides config.cc' '

	git config --replace-all format.cc \
		"C E Cipient <rcipient@example.com>" &&
	git format-patch --no-cc --stdout master..side |
	sed -e "/^\$/q" >patch12 &&
	check_patch patch12 &&
	! grep "^Cc: C E Cipient <rcipient@example.com>\$" patch12
'

test_expect_success '--no-add-header overrides config.headers' '

	git config --replace-all format.headers \
		"Header1: B E Cipient <rcipient@example.com>" &&
	git format-patch --no-add-header --stdout master..side |
	sed -e "/^\$/q" >patch13 &&
	check_patch patch13 &&
	! grep "^Header1: B E Cipient <rcipient@example.com>\$" patch13
'

test_expect_success 'multiple files' '

	rm -rf patches/ &&
	git checkout side &&
	git format-patch -o patches/ master &&
	ls patches/0001-Side-changes-1.patch patches/0002-Side-changes-2.patch patches/0003-Side-changes-3-with-n-backslash-n-in-it.patch
'

test_expect_success 'reroll count' '
	rm -fr patches &&
	git format-patch -o patches --cover-letter --reroll-count 4 master..side >list &&
	! grep -v "^patches/v4-000[0-3]-" list &&
	sed -n -e "/^Subject: /p" $(cat list) >subjects &&
	! grep -v "^Subject: \[PATCH v4 [0-3]/3\] " subjects
'

test_expect_success 'reroll count (-v)' '
	rm -fr patches &&
	git format-patch -o patches --cover-letter -v4 master..side >list &&
	! grep -v "^patches/v4-000[0-3]-" list &&
	sed -n -e "/^Subject: /p" $(cat list) >subjects &&
	! grep -v "^Subject: \[PATCH v4 [0-3]/3\] " subjects
'

proc check_threading {
	setvar expect = "$1" &&
	shift &&
	shell {git format-patch --stdout @ARGV; echo $? > status.out} |
	# Prints everything between the Message-ID and In-Reply-To,
	# and replaces all Message-ID-lookalikes by a sequence number
	perl -ne '
		if (/^(message-id|references|in-reply-to)/i) {
			$printing = 1;
		} elsif (/^\S/) {
			$printing = 0;
		}
		if ($printing) {
			$h{$1}=$i++ if (/<([^>]+)>/ and !exists $h{$1});
			for $k (keys %h) {s/$k/$h{$k}/};
			print;
		}
		print "---\n" if /^From /i;
	' > actual &&
	test 0 = $(cat status.out) &&
	test_cmp $expect actual
}

cat >> expect.no-threading <<< """
---
---
---
"""

test_expect_success 'no threading' '
	git checkout side &&
	check_threading expect.no-threading master
'

cat > expect.thread <<< """
---
Message-Id: <0>
---
Message-Id: <1>
In-Reply-To: <0>
References: <0>
---
Message-Id: <2>
In-Reply-To: <0>
References: <0>
"""

test_expect_success 'thread' '
	check_threading expect.thread --thread master
'

cat > expect.in-reply-to <<< """
---
Message-Id: <0>
In-Reply-To: <1>
References: <1>
---
Message-Id: <2>
In-Reply-To: <1>
References: <1>
---
Message-Id: <3>
In-Reply-To: <1>
References: <1>
"""

test_expect_success 'thread in-reply-to' '
	check_threading expect.in-reply-to --in-reply-to="<test.message>" \
		--thread master
'

cat > expect.cover-letter <<< """
---
Message-Id: <0>
---
Message-Id: <1>
In-Reply-To: <0>
References: <0>
---
Message-Id: <2>
In-Reply-To: <0>
References: <0>
---
Message-Id: <3>
In-Reply-To: <0>
References: <0>
"""

test_expect_success 'thread cover-letter' '
	check_threading expect.cover-letter --cover-letter --thread master
'

cat > expect.cl-irt <<< """
---
Message-Id: <0>
In-Reply-To: <1>
References: <1>
---
Message-Id: <2>
In-Reply-To: <0>
References: <1>
	<0>
---
Message-Id: <3>
In-Reply-To: <0>
References: <1>
	<0>
---
Message-Id: <4>
In-Reply-To: <0>
References: <1>
	<0>
"""

test_expect_success 'thread cover-letter in-reply-to' '
	check_threading expect.cl-irt --cover-letter \
		--in-reply-to="<test.message>" --thread master
'

test_expect_success 'thread explicit shallow' '
	check_threading expect.cl-irt --cover-letter \
		--in-reply-to="<test.message>" --thread=shallow master
'

cat > expect.deep <<< """
---
Message-Id: <0>
---
Message-Id: <1>
In-Reply-To: <0>
References: <0>
---
Message-Id: <2>
In-Reply-To: <1>
References: <0>
	<1>
"""

test_expect_success 'thread deep' '
	check_threading expect.deep --thread=deep master
'

cat > expect.deep-irt <<< """
---
Message-Id: <0>
In-Reply-To: <1>
References: <1>
---
Message-Id: <2>
In-Reply-To: <0>
References: <1>
	<0>
---
Message-Id: <3>
In-Reply-To: <2>
References: <1>
	<0>
	<2>
"""

test_expect_success 'thread deep in-reply-to' '
	check_threading expect.deep-irt  --thread=deep \
		--in-reply-to="<test.message>" master
'

cat > expect.deep-cl <<< """
---
Message-Id: <0>
---
Message-Id: <1>
In-Reply-To: <0>
References: <0>
---
Message-Id: <2>
In-Reply-To: <1>
References: <0>
	<1>
---
Message-Id: <3>
In-Reply-To: <2>
References: <0>
	<1>
	<2>
"""

test_expect_success 'thread deep cover-letter' '
	check_threading expect.deep-cl --cover-letter --thread=deep master
'

cat > expect.deep-cl-irt <<< """
---
Message-Id: <0>
In-Reply-To: <1>
References: <1>
---
Message-Id: <2>
In-Reply-To: <0>
References: <1>
	<0>
---
Message-Id: <3>
In-Reply-To: <2>
References: <1>
	<0>
	<2>
---
Message-Id: <4>
In-Reply-To: <3>
References: <1>
	<0>
	<2>
	<3>
"""

test_expect_success 'thread deep cover-letter in-reply-to' '
	check_threading expect.deep-cl-irt --cover-letter \
		--in-reply-to="<test.message>" --thread=deep master
'

test_expect_success 'thread via config' '
	test_config format.thread true &&
	check_threading expect.thread master
'

test_expect_success 'thread deep via config' '
	test_config format.thread deep &&
	check_threading expect.deep master
'

test_expect_success 'thread config + override' '
	test_config format.thread deep &&
	check_threading expect.thread --thread master
'

test_expect_success 'thread config + --no-thread' '
	test_config format.thread deep &&
	check_threading expect.no-threading --no-thread master
'

test_expect_success 'excessive subject' '

	rm -rf patches/ &&
	git checkout side &&
	for i in 5 6 1 2 3 A 4 B C 7 8 9 10 D E F; do echo "$i"; done >>file &&
	git update-index file &&
	git commit -m "This is an excessively long subject line for a message due to the habit some projects have of not having a short, one-line subject at the start of the commit message, but rather sticking a whole paragraph right at the start as the only thing in the commit message. It had better not become the filename for the patch." &&
	git format-patch -o patches/ master..side &&
	ls patches/0004-This-is-an-excessively-long-subject-line-for-a-messa.patch
'

test_expect_success 'cover-letter inherits diff options' '

	git mv file foo &&
	git commit -m foo &&
	git format-patch --no-renames --cover-letter -1 &&
	check_patch 0000-cover-letter.patch &&
	! grep "file => foo .* 0 *\$" 0000-cover-letter.patch &&
	git format-patch --cover-letter -1 -M &&
	grep "file => foo .* 0 *\$" 0000-cover-letter.patch

'

cat > expect <<< """
  This is an excessively long subject line for a message due to the
    habit some projects have of not having a short, one-line subject at
    the start of the commit message, but rather sticking a whole
    paragraph right at the start as the only thing in the commit
    message. It had better not become the filename for the patch.
  foo

"""

test_expect_success 'shortlog of cover-letter wraps overly-long onelines' '

	git format-patch --cover-letter -2 &&
	sed -e "1,/A U Thor/d" -e "/^\$/q" < 0000-cover-letter.patch > output &&
	test_cmp expect output

'

cat > expect <<< """
index 40f36c6..2dc5c23 100644
--- a/file
+++ b/file
@@ -13,4 +13,20 @@ C
 10
 D
 E
 F
+5
"""

test_expect_success 'format-patch respects -U' '

	git format-patch -U4 -2 &&
	sed -e "1,/^diff/d" -e "/^+5/q" \
		<0001-This-is-an-excessively-long-subject-line-for-a-messa.patch \
		>output &&
	test_cmp expect output

'

cat > expect <<< """

diff --git a/file b/file
index 40f36c6..2dc5c23 100644
--- a/file
+++ b/file
@@ -14,3 +14,19 @@ C
 D
 E
 F
+5
"""

test_expect_success 'format-patch -p suppresses stat' '

	git format-patch -p -2 &&
	sed -e "1,/^\$/d" -e "/^+5/q" < 0001-This-is-an-excessively-long-subject-line-for-a-messa.patch > output &&
	test_cmp expect output

'

test_expect_success 'format-patch from a subdirectory (1)' '
	filename=$(
		rm -rf sub &&
		mkdir -p sub/dir &&
		cd sub/dir &&
		git format-patch -1
	) &&
	case "$filename" in
	0*)
		;; # ok
	*)
		echo "Oops? $filename"
		false
		;;
	esac &&
	test -f "$filename"
'

test_expect_success 'format-patch from a subdirectory (2)' '
	filename=$(
		rm -rf sub &&
		mkdir -p sub/dir &&
		cd sub/dir &&
		git format-patch -1 -o ..
	) &&
	case "$filename" in
	../0*)
		;; # ok
	*)
		echo "Oops? $filename"
		false
		;;
	esac &&
	basename=$(expr "$filename" : ".*/\(.*\)") &&
	test -f "sub/$basename"
'

test_expect_success 'format-patch from a subdirectory (3)' '
	rm -f 0* &&
	filename=$(
		rm -rf sub &&
		mkdir -p sub/dir &&
		cd sub/dir &&
		git format-patch -1 -o "$TRASH_DIRECTORY"
	) &&
	basename=$(expr "$filename" : ".*/\(.*\)") &&
	test -f "$basename"
'

test_expect_success 'format-patch --in-reply-to' '
	git format-patch -1 --stdout --in-reply-to "baz@foo.bar" > patch8 &&
	grep "^In-Reply-To: <baz@foo.bar>" patch8 &&
	grep "^References: <baz@foo.bar>" patch8
'

test_expect_success 'format-patch --signoff' '
	git format-patch -1 --signoff --stdout >out &&
	grep "^Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>" out
'

test_expect_success 'format-patch --notes --signoff' '
	git notes --ref test add -m "test message" HEAD &&
	git format-patch -1 --signoff --stdout --notes=test >out &&
	# Three dashes must come after S-o-b
	! sed "/^Signed-off-by: /q" out | grep "test message" &&
	sed "1,/^Signed-off-by: /d" out | grep "test message" &&
	# Notes message must come after three dashes
	! sed "/^---$/q" out | grep "test message" &&
	sed "1,/^---$/d" out | grep "test message"
'

echo "fatal: --name-only does not make sense" > expect.name-only
echo "fatal: --name-status does not make sense" > expect.name-status
echo "fatal: --check does not make sense" > expect.check

test_expect_success 'options no longer allowed for format-patch' '
	test_must_fail git format-patch --name-only 2> output &&
	test_i18ncmp expect.name-only output &&
	test_must_fail git format-patch --name-status 2> output &&
	test_i18ncmp expect.name-status output &&
	test_must_fail git format-patch --check 2> output &&
	test_i18ncmp expect.check output'

test_expect_success 'format-patch --numstat should produce a patch' '
	git format-patch --numstat --stdout master..side > output &&
	test 5 = $(grep "^diff --git a/" output | wc -l)'

test_expect_success 'format-patch -- <path>' '
	git format-patch master..side -- file 2>error &&
	! grep "Use .--" error
'

test_expect_success 'format-patch --ignore-if-in-upstream HEAD' '
	git format-patch --ignore-if-in-upstream HEAD
'

setvar git_version = "$(git --version | sed "s/.* //")"

proc signature {
	printf "%s\n%s\n\n" "-- " ${1:-$git_version}
}

test_expect_success 'format-patch default signature' '
	git format-patch --stdout -1 | tail -n 3 >output &&
	signature >expect &&
	test_cmp expect output
'

test_expect_success 'format-patch --signature' '
	git format-patch --stdout --signature="my sig" -1 | tail -n 3 >output &&
	signature "my sig" >expect &&
	test_cmp expect output
'

test_expect_success 'format-patch with format.signature config' '
	git config format.signature "config sig" &&
	git format-patch --stdout -1 >output &&
	grep "config sig" output
'

test_expect_success 'format-patch --signature overrides format.signature' '
	git config format.signature "config sig" &&
	git format-patch --stdout --signature="overrides" -1 >output &&
	! grep "config sig" output &&
	grep "overrides" output
'

test_expect_success 'format-patch --no-signature ignores format.signature' '
	git config format.signature "config sig" &&
	git format-patch --stdout --signature="my sig" --no-signature \
		-1 >output &&
	check_patch output &&
	! grep "config sig" output &&
	! grep "my sig" output &&
	! grep "^-- \$" output
'

test_expect_success 'format-patch --signature --cover-letter' '
	git config --unset-all format.signature &&
	git format-patch --stdout --signature="my sig" --cover-letter \
		-1 >output &&
	grep "my sig" output &&
	test 2 = $(grep "my sig" output | wc -l)
'

test_expect_success 'format.signature="" suppresses signatures' '
	git config format.signature "" &&
	git format-patch --stdout -1 >output &&
	check_patch output &&
	! grep "^-- \$" output
'

test_expect_success 'format-patch --no-signature suppresses signatures' '
	git config --unset-all format.signature &&
	git format-patch --stdout --no-signature -1 >output &&
	check_patch output &&
	! grep "^-- \$" output
'

test_expect_success 'format-patch --signature="" suppresses signatures' '
	git format-patch --stdout --signature="" -1 >output &&
	check_patch output &&
	! grep "^-- \$" output
'

test_expect_success 'prepare mail-signature input' '
	cat >mail-signature <<-\EOF

	Test User <test.email@kernel.org>
	http://git.kernel.org/cgit/git/git.git

	git.kernel.org/?p=git/git.git;a=summary

	EOF
'

test_expect_success '--signature-file=file works' '
	git format-patch --stdout --signature-file=mail-signature -1 >output &&
	check_patch output &&
	sed -e "1,/^-- \$/d" <output >actual &&
	{
		cat mail-signature && echo
	} >expect &&
	test_cmp expect actual
'

test_expect_success 'format.signaturefile works' '
	test_config format.signaturefile mail-signature &&
	git format-patch --stdout -1 >output &&
	check_patch output &&
	sed -e "1,/^-- \$/d" <output >actual &&
	{
		cat mail-signature && echo
	} >expect &&
	test_cmp expect actual
'

test_expect_success '--no-signature suppresses format.signaturefile ' '
	test_config format.signaturefile mail-signature &&
	git format-patch --stdout --no-signature -1 >output &&
	check_patch output &&
	! grep "^-- \$" output
'

test_expect_success '--signature-file overrides format.signaturefile' '
	cat >other-mail-signature <<-\EOF &&
	Use this other signature instead of mail-signature.
	EOF
	test_config format.signaturefile mail-signature &&
	git format-patch --stdout \
			--signature-file=other-mail-signature -1 >output &&
	check_patch output &&
	sed -e "1,/^-- \$/d" <output >actual &&
	{
		cat other-mail-signature && echo
	} >expect &&
	test_cmp expect actual
'

test_expect_success '--signature overrides format.signaturefile' '
	test_config format.signaturefile mail-signature &&
	git format-patch --stdout --signature="my sig" -1 >output &&
	check_patch output &&
	grep "my sig" output
'

test_expect_success TTY 'format-patch --stdout paginates' '
	rm -f pager_used &&
	test_terminal env GIT_PAGER="wc >pager_used" git format-patch --stdout --all &&
	test_path_is_file pager_used
'

 test_expect_success TTY 'format-patch --stdout pagination can be disabled' '
	rm -f pager_used &&
	test_terminal env GIT_PAGER="wc >pager_used" git --no-pager format-patch --stdout --all &&
	test_terminal env GIT_PAGER="wc >pager_used" git -c "pager.format-patch=false" format-patch --stdout --all &&
	test_path_is_missing pager_used &&
	test_path_is_missing .git/pager_used
'

test_expect_success 'format-patch handles multi-line subjects' '
	rm -rf patches/ &&
	echo content >>file &&
	for i in one two three; do echo $i; done >msg &&
	git add file &&
	git commit -F msg &&
	git format-patch -o patches -1 &&
	grep ^Subject: patches/0001-one.patch >actual &&
	echo "Subject: [PATCH] one two three" >expect &&
	test_cmp expect actual
'

test_expect_success 'format-patch handles multi-line encoded subjects' '
	rm -rf patches/ &&
	echo content >>file &&
	for i in en två tre; do echo $i; done >msg &&
	git add file &&
	git commit -F msg &&
	git format-patch -o patches -1 &&
	grep ^Subject: patches/0001-en.patch >actual &&
	echo "Subject: [PATCH] =?UTF-8?q?en=20tv=C3=A5=20tre?=" >expect &&
	test_cmp expect actual
'

setvar M8 = ""foo bar ""
setvar M64 = "$M8$M8$M8$M8$M8$M8$M8$M8"
setvar M512 = "$M64$M64$M64$M64$M64$M64$M64$M64"
cat >expect <<< '''
Subject: [PATCH] foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo
 bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar
 foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo
 bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar
 foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo
 bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar
 foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar foo bar
'''
test_expect_success 'format-patch wraps extremely long subject (ascii)' '
	echo content >>file &&
	git add file &&
	git commit -m "$M512" &&
	git format-patch --stdout -1 >patch &&
	sed -n "/^Subject/p; /^ /p; /^$/q" <patch >subject &&
	test_cmp expect subject
'

setvar M8 = ""föö bar ""
setvar M64 = "$M8$M8$M8$M8$M8$M8$M8$M8"
setvar M512 = "$M64$M64$M64$M64$M64$M64$M64$M64"
cat >expect <<< '''
Subject: [PATCH] =?UTF-8?q?f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f?=
 =?UTF-8?q?=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar?=
 =?UTF-8?q?=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20?=
 =?UTF-8?q?bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6?=
 =?UTF-8?q?=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6?=
 =?UTF-8?q?=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f?=
 =?UTF-8?q?=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar?=
 =?UTF-8?q?=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20?=
 =?UTF-8?q?bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6?=
 =?UTF-8?q?=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6?=
 =?UTF-8?q?=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f?=
 =?UTF-8?q?=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar?=
 =?UTF-8?q?=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20?=
 =?UTF-8?q?bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6?=
 =?UTF-8?q?=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6?=
 =?UTF-8?q?=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f?=
 =?UTF-8?q?=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar?=
 =?UTF-8?q?=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20?=
 =?UTF-8?q?bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6?=
 =?UTF-8?q?=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6?=
 =?UTF-8?q?=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f?=
 =?UTF-8?q?=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar?=
 =?UTF-8?q?=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20bar=20f=C3=B6=C3=B6=20?=
 =?UTF-8?q?bar?=
'''
test_expect_success 'format-patch wraps extremely long subject (rfc2047)' '
	rm -rf patches/ &&
	echo content >>file &&
	git add file &&
	git commit -m "$M512" &&
	git format-patch --stdout -1 >patch &&
	sed -n "/^Subject/p; /^ /p; /^$/q" <patch >subject &&
	test_cmp expect subject
'

proc check_author {
	echo content >>file &&
	git add file" &&
	GIT_AUTHOR_NAME=$1" git commit -m author-check &&
	git format-patch --stdout -1 >patch &&
	sed -n "/^From: /p; /^ /p; /^$/q" <patch >actual &&
	test_cmp expect actual
}

cat >expect <<< '''
From: "Foo B. Bar" <author@example.com>
'''
test_expect_success 'format-patch quotes dot in from-headers' '
	check_author "Foo B. Bar"
'

cat >expect <<< '''
From: "Foo \"The Baz\" Bar" <author@example.com>
'''
test_expect_success 'format-patch quotes double-quote in from-headers' '
	check_author "Foo \"The Baz\" Bar"
'

cat >expect <<< '''
From: =?UTF-8?q?F=C3=B6o=20Bar?= <author@example.com>
'''
test_expect_success 'format-patch uses rfc2047-encoded from-headers when necessary' '
	check_author "Föo Bar"
'

cat >expect <<< '''
From: =?UTF-8?q?F=C3=B6o=20B=2E=20Bar?= <author@example.com>
'''
test_expect_success 'rfc2047-encoded from-headers leave no rfc822 specials' '
	check_author "Föo B. Bar"
'

cat >expect <<< """
From: foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_
 <author@example.com>
"""
test_expect_success 'format-patch wraps moderately long from-header (ascii)' '
	check_author "foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_foo_bar_"
'

cat >expect <<< '''
From: Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar
 Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo
 Bar Foo Bar Foo Bar Foo Bar <author@example.com>
'''
test_expect_success 'format-patch wraps extremely long from-header (ascii)' '
	check_author "Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar"
'

cat >expect <<< '''
From: "Foo.Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar
 Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo
 Bar Foo Bar Foo Bar Foo Bar" <author@example.com>
'''
test_expect_success 'format-patch wraps extremely long from-header (rfc822)' '
	check_author "Foo.Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar"
'

cat >expect <<< '''
From: =?UTF-8?q?Fo=C3=B6=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo?=
 =?UTF-8?q?=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20?=
 =?UTF-8?q?Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar?=
 =?UTF-8?q?=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20Foo=20Bar=20?=
 =?UTF-8?q?Foo=20Bar=20Foo=20Bar?= <author@example.com>
'''
test_expect_success 'format-patch wraps extremely long from-header (rfc2047)' '
	check_author "Foö Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar Foo Bar"
'

cat >expect <<< '''
Subject: header with . in it
'''
test_expect_success 'subject lines do not have 822 atom-quoting' '
	echo content >>file &&
	git add file &&
	git commit -m "header with . in it" &&
	git format-patch -k -1 --stdout >patch &&
	grep ^Subject: patch >actual &&
	test_cmp expect actual
'

cat >expect <<< '''
Subject: [PREFIX 1/1] header with . in it
'''
test_expect_success 'subject prefixes have space prepended' '
	git format-patch -n -1 --stdout --subject-prefix=PREFIX >patch &&
	grep ^Subject: patch >actual &&
	test_cmp expect actual
'

cat >expect <<< '''
Subject: [1/1] header with . in it
'''
test_expect_success 'empty subject prefix does not have extra space' '
	git format-patch -n -1 --stdout --subject-prefix= >patch &&
	grep ^Subject: patch >actual &&
	test_cmp expect actual
'

test_expect_success '--rfc' '
	cat >expect <<-\EOF &&
	Subject: [RFC PATCH 1/1] header with . in it
	EOF
	git format-patch -n -1 --stdout --rfc >patch &&
	grep ^Subject: patch >actual &&
	test_cmp expect actual
'

test_expect_success '--from=ident notices bogus ident' '
	test_must_fail git format-patch -1 --stdout --from=foo >patch
'

test_expect_success '--from=ident replaces author' '
	git format-patch -1 --stdout --from="Me <me@example.com>" >patch &&
	cat >expect <<-\EOF &&
	From: Me <me@example.com>

	From: A U Thor <author@example.com>

	EOF
	sed -ne "/^From:/p; /^$/p; /^---$/q" <patch >patch.head &&
	test_cmp expect patch.head
'

test_expect_success '--from uses committer ident' '
	git format-patch -1 --stdout --from >patch &&
	cat >expect <<-\EOF &&
	From: C O Mitter <committer@example.com>

	From: A U Thor <author@example.com>

	EOF
	sed -ne "/^From:/p; /^$/p; /^---$/q" <patch >patch.head &&
	test_cmp expect patch.head
'

test_expect_success '--from omits redundant in-body header' '
	git format-patch -1 --stdout --from="A U Thor <author@example.com>" >patch &&
	cat >expect <<-\EOF &&
	From: A U Thor <author@example.com>

	EOF
	sed -ne "/^From:/p; /^$/p; /^---$/q" <patch >patch.head &&
	test_cmp expect patch.head
'

test_expect_success 'in-body headers trigger content encoding' '
	test_env GIT_AUTHOR_NAME="éxötìc" test_commit exotic &&
	test_when_finished "git reset --hard HEAD^" &&
	git format-patch -1 --stdout --from >patch &&
	cat >expect <<-\EOF &&
	From: C O Mitter <committer@example.com>
	Content-Type: text/plain; charset=UTF-8

	From: éxötìc <author@example.com>

	EOF
	sed -ne "/^From:/p; /^$/p; /^Content-Type/p; /^---$/q" <patch >patch.head &&
	test_cmp expect patch.head
'

proc append_signoff {
	setvar C = $(git commit-tree HEAD^^{tree} -p HEAD) &&
	git format-patch --stdout --signoff $C^..$C >append_signoff.patch &&
	sed -n -e "1,/^---$/p" append_signoff.patch |
		egrep -n "^Subject|Sign|^$"
}

test_expect_success 'signoff: commit with no body' '
	append_signoff </dev/null >actual &&
	cat <<\EOF | sed "s/EOL$//" >expected &&
4:Subject: [PATCH] EOL
8:
9:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: commit with only subject' '
	echo subject | append_signoff >actual &&
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
9:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: commit with only subject that does not end with NL' '
	printf subject | append_signoff >actual &&
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
9:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: no existing signoffs' '
	append_signoff <<\EOF >actual &&
subject

body
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
11:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: no existing signoffs and no trailing NL' '
	printf "subject\n\nbody" | append_signoff >actual &&
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
11:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: some random signoff' '
	append_signoff <<\EOF >actual &&
subject

body

Signed-off-by: my@house
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
11:Signed-off-by: my@house
12:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: misc conforming footer elements' '
	append_signoff <<\EOF >actual &&
subject

body

Signed-off-by: my@house
(cherry picked from commit da39a3ee5e6b4b0d3255bfef95601890afd80709)
Tested-by: Some One <someone@example.com>
Bug: 1234
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
11:Signed-off-by: my@house
15:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: some random signoff-alike' '
	append_signoff <<\EOF >actual &&
subject

body
Fooled-by-me: my@house
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
11:
12:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: not really a signoff' '
	append_signoff <<\EOF >actual &&
subject

I want to mention about Signed-off-by: here.
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
9:I want to mention about Signed-off-by: here.
10:
11:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: not really a signoff (2)' '
	append_signoff <<\EOF >actual &&
subject

My unfortunate
Signed-off-by: example happens to be wrapped here.
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:Signed-off-by: example happens to be wrapped here.
11:
12:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: valid S-o-b paragraph in the middle' '
	append_signoff <<\EOF >actual &&
subject

Signed-off-by: my@house
Signed-off-by: your@house

A lot of houses.
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
9:Signed-off-by: my@house
10:Signed-off-by: your@house
11:
13:
14:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: the same signoff at the end' '
	append_signoff <<\EOF >actual &&
subject

body

Signed-off-by: C O Mitter <committer@example.com>
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
11:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: the same signoff at the end, no trailing NL' '
	printf "subject\n\nSigned-off-by: C O Mitter <committer@example.com>" |
		append_signoff >actual &&
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
9:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: the same signoff NOT at the end' '
	append_signoff <<\EOF >actual &&
subject

body

Signed-off-by: C O Mitter <committer@example.com>
Signed-off-by: my@house
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
11:Signed-off-by: C O Mitter <committer@example.com>
12:Signed-off-by: my@house
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: detect garbage in non-conforming footer' '
	append_signoff <<\EOF >actual &&
subject

body

Tested-by: my@house
Some Trash
Signed-off-by: C O Mitter <committer@example.com>
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
13:Signed-off-by: C O Mitter <committer@example.com>
14:
15:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'signoff: footer begins with non-signoff without @ sign' '
	append_signoff <<\EOF >actual &&
subject

body

Reviewed-id: Noone
Tested-by: my@house
Change-id: Ideadbeef
Signed-off-by: C O Mitter <committer@example.com>
Bug: 1234
EOF
	cat >expected <<\EOF &&
4:Subject: [PATCH] subject
8:
10:
14:Signed-off-by: C O Mitter <committer@example.com>
EOF
	test_cmp expected actual
'

test_expect_success 'format patch ignores color.ui' '
	test_unconfig color.ui &&
	git format-patch --stdout -1 >expect &&
	test_config color.ui always &&
	git format-patch --stdout -1 >actual &&
	test_cmp expect actual
'

test_expect_success 'cover letter using branch description (1)' '
	git checkout rebuild-1 &&
	test_config branch.rebuild-1.description hello &&
	git format-patch --stdout --cover-letter master >actual &&
	grep hello actual >/dev/null
'

test_expect_success 'cover letter using branch description (2)' '
	git checkout rebuild-1 &&
	test_config branch.rebuild-1.description hello &&
	git format-patch --stdout --cover-letter rebuild-1~2..rebuild-1 >actual &&
	grep hello actual >/dev/null
'

test_expect_success 'cover letter using branch description (3)' '
	git checkout rebuild-1 &&
	test_config branch.rebuild-1.description hello &&
	git format-patch --stdout --cover-letter ^master rebuild-1 >actual &&
	grep hello actual >/dev/null
'

test_expect_success 'cover letter using branch description (4)' '
	git checkout rebuild-1 &&
	test_config branch.rebuild-1.description hello &&
	git format-patch --stdout --cover-letter master.. >actual &&
	grep hello actual >/dev/null
'

test_expect_success 'cover letter using branch description (5)' '
	git checkout rebuild-1 &&
	test_config branch.rebuild-1.description hello &&
	git format-patch --stdout --cover-letter -2 HEAD >actual &&
	grep hello actual >/dev/null
'

test_expect_success 'cover letter using branch description (6)' '
	git checkout rebuild-1 &&
	test_config branch.rebuild-1.description hello &&
	git format-patch --stdout --cover-letter -2 >actual &&
	grep hello actual >/dev/null
'

test_expect_success 'cover letter with nothing' '
	git format-patch --stdout --cover-letter >actual &&
	test_line_count = 0 actual
'

test_expect_success 'cover letter auto' '
	mkdir -p tmp &&
	test_when_finished "rm -rf tmp;
		git config --unset format.coverletter" &&

	git config format.coverletter auto &&
	git format-patch -o tmp -1 >list &&
	test_line_count = 1 list &&
	git format-patch -o tmp -2 >list &&
	test_line_count = 3 list
'

test_expect_success 'cover letter auto user override' '
	mkdir -p tmp &&
	test_when_finished "rm -rf tmp;
		git config --unset format.coverletter" &&

	git config format.coverletter auto &&
	git format-patch -o tmp --cover-letter -1 >list &&
	test_line_count = 2 list &&
	git format-patch -o tmp --cover-letter -2 >list &&
	test_line_count = 3 list &&
	git format-patch -o tmp --no-cover-letter -1 >list &&
	test_line_count = 1 list &&
	git format-patch -o tmp --no-cover-letter -2 >list &&
	test_line_count = 2 list
'

test_expect_success 'format-patch --zero-commit' '
	git format-patch --zero-commit --stdout v2..v1 >patch2 &&
	grep "^From " patch2 | sort | uniq >actual &&
	echo "From $_z40 Mon Sep 17 00:00:00 2001" >expect &&
	test_cmp expect actual
'

test_expect_success 'From line has expected format' '
	git format-patch --stdout v2..v1 >patch2 &&
	grep "^From " patch2 >from &&
	grep "^From $_x40 Mon Sep 17 00:00:00 2001$" patch2 >filtered &&
	test_cmp from filtered
'

test_expect_success 'format-patch format.outputDirectory option' '
	test_config format.outputDirectory patches &&
	rm -fr patches &&
	git format-patch master..side &&
	test $(git rev-list master..side | wc -l) -eq $(ls patches | wc -l)
'

test_expect_success 'format-patch -o overrides format.outputDirectory' '
	test_config format.outputDirectory patches &&
	rm -fr patches patchset &&
	git format-patch master..side -o patchset &&
	test_path_is_missing patches &&
	test_path_is_dir patchset
'

test_expect_success 'format-patch --base' '
	git checkout side &&
	git format-patch --stdout --base=HEAD~3 -1 | tail -n 7 >actual &&
	echo >expected &&
	echo "base-commit: $(git rev-parse HEAD~3)" >>expected &&
	echo "prerequisite-patch-id: $(git show --patch HEAD~2 | git patch-id --stable | awk "{print \$1}")" >>expected &&
	echo "prerequisite-patch-id: $(git show --patch HEAD~1 | git patch-id --stable | awk "{print \$1}")" >>expected &&
	signature >> expected &&
	test_cmp expected actual
'

test_expect_success 'format-patch --base errors out when base commit is in revision list' '
	test_must_fail git format-patch --base=HEAD -2 &&
	test_must_fail git format-patch --base=HEAD~1 -2 &&
	git format-patch --stdout --base=HEAD~2 -2 >patch &&
	grep "^base-commit:" patch >actual &&
	echo "base-commit: $(git rev-parse HEAD~2)" >expected &&
	test_cmp expected actual
'

test_expect_success 'format-patch --base errors out when base commit is not ancestor of revision list' '
	# For history as below:
	#
	#    ---Q---P---Z---Y---*---X
	#	 \             /
	#	  ------------W
	#
	# If "format-patch Z..X" is given, P and Z can not be specified as the base commit
	git checkout -b topic1 master &&
	git rev-parse HEAD >commit-id-base &&
	test_commit P &&
	git rev-parse HEAD >commit-id-P &&
	test_commit Z &&
	git rev-parse HEAD >commit-id-Z &&
	test_commit Y &&
	git checkout -b topic2 master &&
	test_commit W &&
	git merge topic1 &&
	test_commit X &&
	test_must_fail git format-patch --base=$(cat commit-id-P) -3 &&
	test_must_fail git format-patch --base=$(cat commit-id-Z) -3 &&
	git format-patch --stdout --base=$(cat commit-id-base) -3 >patch &&
	grep "^base-commit:" patch >actual &&
	echo "base-commit: $(cat commit-id-base)" >expected &&
	test_cmp expected actual
'

test_expect_success 'format-patch --base=auto' '
	git checkout -b upstream master &&
	git checkout -b local upstream &&
	git branch --set-upstream-to=upstream &&
	test_commit N1 &&
	test_commit N2 &&
	git format-patch --stdout --base=auto -2 >patch &&
	grep "^base-commit:" patch >actual &&
	echo "base-commit: $(git rev-parse upstream)" >expected &&
	test_cmp expected actual
'

test_expect_success 'format-patch errors out when history involves criss-cross' '
	# setup criss-cross history
	#
	#   B---M1---D
	#  / \ /
	# A   X
	#  \ / \
	#   C---M2---E
	#
	git checkout master &&
	test_commit A &&
	git checkout -b xb master &&
	test_commit B &&
	git checkout -b xc master &&
	test_commit C &&
	git checkout -b xbc xb -- &&
	git merge xc &&
	git checkout -b xcb xc -- &&
	git branch --set-upstream-to=xbc &&
	git merge xb &&
	git checkout xbc &&
	test_commit D &&
	git checkout xcb &&
	test_commit E &&
	test_must_fail 	git format-patch --base=auto -1
'

test_expect_success 'format-patch format.useAutoBaseoption' '
	test_when_finished "git config --unset format.useAutoBase" &&
	git checkout local &&
	git config format.useAutoBase true &&
	git format-patch --stdout -1 >patch &&
	grep "^base-commit:" patch >actual &&
	echo "base-commit: $(git rev-parse upstream)" >expected &&
	test_cmp expected actual
'

test_expect_success 'format-patch --base overrides format.useAutoBase' '
	test_when_finished "git config --unset format.useAutoBase" &&
	git config format.useAutoBase true &&
	git format-patch --stdout --base=HEAD~1 -1 >patch &&
	grep "^base-commit:" patch >actual &&
	echo "base-commit: $(git rev-parse HEAD~1)" >expected &&
	test_cmp expected actual
'

test_expect_success 'format-patch --base with --attach' '
	git format-patch --attach=mimemime --stdout --base=HEAD~ -1 >patch &&
	sed -n -e "/^base-commit:/s/.*/1/p" -e "/^---*mimemime--$/s/.*/2/p" \
		patch >actual &&
	test_write_lines 1 2 >expect &&
	test_cmp expect actual
'

test_expect_success 'format-patch --pretty=mboxrd' '
	sp=" " &&
	cat >msg <<-INPUT_END &&
	mboxrd should escape the body

	From could trip up a loose mbox parser
	>From extra escape for reversibility
	>>From extra escape for reversibility 2
	from lower case not escaped
	Fromm bad speling not escaped
	 From with leading space not escaped

	F
	From
	From$sp
	From    $sp
	From	$sp
	INPUT_END

	cat >expect <<-INPUT_END &&
	>From could trip up a loose mbox parser
	>>From extra escape for reversibility
	>>>From extra escape for reversibility 2
	from lower case not escaped
	Fromm bad speling not escaped
	 From with leading space not escaped

	F
	From
	From
	From
	From
	INPUT_END

	C=$(git commit-tree HEAD^^{tree} -p HEAD <msg) &&
	git format-patch --pretty=mboxrd --stdout -1 $C~1..$C >patch &&
	git grep -h --no-index -A11 \
		"^>From could trip up a loose mbox parser" patch >actual &&
	test_cmp expect actual
'

test_done
    (DONE benchmarks/testdata/t4014-format-patch.sh)

# libtool (GNU libtool) 2.4.2
# Written by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996

# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,
# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
# This is free software; see the source for copying conditions.  There is NO
# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.

# GNU Libtool is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# As a special exception to the GNU General Public License,
# if you distribute this file as part of a program or library that
# is built using GNU Libtool, you may include this file under the
# same distribution terms that you use for the rest of that program.
#
# GNU Libtool is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Libtool; see the file COPYING.  If not, a copy
# can be downloaded from http://www.gnu.org/licenses/gpl.html,
# or obtained by writing to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.

# Usage: $progname [OPTION]... [MODE-ARG]...
#
# Provide generalized library-building support services.
#
#       --config             show all configuration variables
#       --debug              enable verbose shell tracing
#   -n, --dry-run            display commands without modifying any files
#       --features           display basic configuration information and exit
#       --mode=MODE          use operation mode MODE
#       --preserve-dup-deps  don't remove duplicate dependency libraries
#       --quiet, --silent    don't print informational messages
#       --no-quiet, --no-silent
#                            print informational messages (default)
#       --no-warn            don't display warning messages
#       --tag=TAG            use configuration variables from tag TAG
#   -v, --verbose            print more informational messages than default
#       --no-verbose         don't print the extra informational messages
#       --version            print version information
#   -h, --help, --help-all   print short, long, or detailed help message
#
# MODE must be one of the following:
#
#         clean              remove files from the build directory
#         compile            compile a source file into a libtool object
#         execute            automatically set library path, then run a program
#         finish             complete the installation of libtool libraries
#         install            install libraries or executables
#         link               create a library or an executable
#         uninstall          remove libraries from an installed directory
#
# MODE-ARGS vary depending on the MODE.  When passed as first option,
# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that.
# Try `$progname --help --mode=MODE' for a more detailed description of MODE.
#
# When reporting a bug, please describe a test case to reproduce it and
# include the following information:
#
#         host-triplet:	$host
#         shell:		$SHELL
#         compiler:		$LTCC
#         compiler flags:		$LTCFLAGS
#         linker:		$LD (gnu? $with_gnu_ld)
#         $progname:	(GNU libtool) 2.4.2 Debian-2.4.2-1.7ubuntu1
#         automake:	$automake_version
#         autoconf:	$autoconf_version
#
# Report bugs to <bug-libtool@gnu.org>.
# GNU libtool home page: <http://www.gnu.org/software/libtool/>.
# General help using GNU software: <http://www.gnu.org/gethelp/>.

setvar PROGRAM = 'libtool'
setvar PACKAGE = 'libtool'
setvar VERSION = ""2.4.2 Debian-2.4.2-1.7ubuntu1""
setvar TIMESTAMP = """"
setvar package_revision = '1.3337'

# Be Bourne compatible
if test -n ${ZSH_VERSION+set} && shell {emulate sh} >/dev/null 2>&1 {
  emulate sh
  setvar NULLCMD = ':'
  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
  # is contrary to our usage.  Disable this feature.
  alias -g '${1+"$@"}'='"$@"'
  setopt NO_GLOB_SUBST
} else {
  case{ *posix* { set -o posix} }
}
setvar BIN_SH = 'xpg4'; export BIN_SH # for Tru64
setvar DUALCASE = '1'; export DUALCASE # for MKS sh

# A function that is used when there is no print builtin or printf.
proc func_fallback_echo {
  eval 'cat <<_LTECHO_EOF
$1
_LTECHO_EOF'
}

# NLS nuisances: We save the old values to restore during execute mode.
setvar lt_user_locale = ''
setvar lt_safe_locale = ''
for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
{
  eval "if test \"\${$lt_var+set}\" = set; then
          save_$lt_var=\$$lt_var
          $lt_var=C
	  export $lt_var
	  lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\"
	  lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\"
	fi"
}
setvar LC_ALL = 'C'
setvar LANGUAGE = 'C'
export LANGUAGE LC_ALL

$lt_unset CDPATH


# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
# is ksh but when the shell is invoked as "sh" and the current value of
# the _XPG environment variable is not equal to 1 (one), the special
# positional parameter $0, within a function call, is the name of the
# function.
setvar progpath = "$0"



: ${CP="cp -f"}
test ${ECHO+set} = set || setvar ECHO = ${as_echo-'printf %s\n'}
: ${MAKE="make"}
: ${MKDIR="mkdir"}
: ${MV="mv -f"}
: ${RM="rm -f"}
: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
: ${Xsed="$SED -e 1s/^X//"}

# Global variables:
setvar EXIT_SUCCESS = '0'
setvar EXIT_FAILURE = '1'
setvar EXIT_MISMATCH = '63'  # $? = 63 is used to indicate version mismatch to missing.
setvar EXIT_SKIP = '77'	  # $? = 77 is used to indicate a skipped test to automake.

setvar exit_status = "$EXIT_SUCCESS"

# Make sure IFS has a sensible default
setvar lt_nl = ''
''
setvar IFS = "" 	$lt_nl""

setvar dirname = ""s,/[^/]*$,,""
setvar basename = ""s,^.*/,,""

# func_dirname file append nondir_replacement
# Compute the dirname of FILE.  If nonempty, add APPEND to the result,
# otherwise set result to NONDIR_REPLACEMENT.
proc func_dirname {
    setvar func_dirname_result = $($ECHO ${1} | $SED $dirname)
    if test "X$func_dirname_result" = "X${1}" {
      setvar func_dirname_result = "${3}"
    } else {
      setvar func_dirname_result = ""$func_dirname_result${2}""
    }
} # func_dirname may be replaced by extended shell implementation


# func_basename file
proc func_basename {
    setvar func_basename_result = $($ECHO ${1} | $SED $basename)
} # func_basename may be replaced by extended shell implementation


# func_dirname_and_basename file append nondir_replacement
# perform func_basename and func_dirname in a single function
# call:
#   dirname:  Compute the dirname of FILE.  If nonempty,
#             add APPEND to the result, otherwise set result
#             to NONDIR_REPLACEMENT.
#             value returned in "$func_dirname_result"
#   basename: Compute filename of FILE.
#             value retuned in "$func_basename_result"
# Implementation must be kept synchronized with func_dirname
# and func_basename. For efficiency, we do not delegate to
# those functions but instead duplicate the functionality here.
proc func_dirname_and_basename {
    # Extract subdirectory from the argument.
    setvar func_dirname_result = $($ECHO ${1} | $SED -e $dirname)
    if test "X$func_dirname_result" = "X${1}" {
      setvar func_dirname_result = "${3}"
    } else {
      setvar func_dirname_result = ""$func_dirname_result${2}""
    }
    setvar func_basename_result = $($ECHO ${1} | $SED -e $basename)
} # func_dirname_and_basename may be replaced by extended shell implementation


# func_stripname prefix suffix name
# strip PREFIX and SUFFIX off of NAME.
# PREFIX and SUFFIX must not contain globbing or regex special
# characters, hashes, percent signs, but SUFFIX may contain a leading
# dot (in which case that matches only a dot).
# func_strip_suffix prefix name
proc func_stripname {
    case{
      .* { setvar func_stripname_result = $($ECHO ${3} | $SED "s%^${1}%%; s%\\\\${2}\$%%)}
      * {  setvar func_stripname_result = $($ECHO ${3} | $SED "s%^${1}%%; s%${2}\$%%)}
    }
} # func_stripname may be replaced by extended shell implementation


# These SED scripts presuppose an absolute path with a trailing slash.
setvar pathcar = ''s,^/\([^/]*\).*$,\1,''
setvar pathcdr = ''s,^/[^/]*,,''
setvar removedotparts = '':dotsl
		s@/\./@/@g
		t dotsl
		s,/\.$,/,''
setvar collapseslashes = ''s@/\{1,\}@/@g''
setvar finalslash = ''s,/*$,/,''

# func_normal_abspath PATH
# Remove doubled-up and trailing slashes, "." path components,
# and cancel out any ".." path components in PATH after making
# it an absolute path.
#             value returned in "$func_normal_abspath_result"
proc func_normal_abspath {
  # Start from root dir and reassemble the path.
  setvar func_normal_abspath_result = ''
  setvar func_normal_abspath_tpath = "$1"
  setvar func_normal_abspath_altnamespace = ''
  case (func_normal_abspath_tpath) {
    "" {
      # Empty path, that just means $cwd.
      func_stripname '' '/' $(pwd)
      setvar func_normal_abspath_result = "$func_stripname_result"
      return
    }
    # The next three entries are used to spot a run of precisely
    # two leading slashes without using negated character classes;
    # we take advantage of case's first-match behaviour.
    ///* {
      # Unusual form of absolute path, do nothing.
    }
    //* {
      # Not necessarily an ordinary path; POSIX reserves leading '//'
      # and for example Cygwin uses it to access remote file shares
      # over CIFS/SMB, so we conserve a leading double slash if found.
      setvar func_normal_abspath_altnamespace = '/'
    }
    /* {
      # Absolute path, do nothing.
    }
    * {
      # Relative path, prepend $cwd.
      setvar func_normal_abspath_tpath = "$(pwd)/$func_normal_abspath_tpath"
    }
  }
  # Cancel out all the simple stuff to save iterations.  We also want
  # the path to end with a slash for ease of parsing, so make sure
  # there is one (and only one) here.
  setvar func_normal_abspath_tpath = $($ECHO $func_normal_abspath_tpath | $SED \
        -e $removedotparts -e $collapseslashes -e $finalslash)
  while : {
    # Processed it all yet?
    if test $func_normal_abspath_tpath = /  {
      # If we ascended to the root using ".." the result may be empty now.
      if test -z $func_normal_abspath_result  {
        setvar func_normal_abspath_result = '/'
      }
      break
    }
    setvar func_normal_abspath_tcomponent = $($ECHO $func_normal_abspath_tpath | $SED \
        -e $pathcar)
    setvar func_normal_abspath_tpath = $($ECHO $func_normal_abspath_tpath | $SED \
        -e $pathcdr)
    # Figure out what to do with it
    case (func_normal_abspath_tcomponent) {
      "" {
        # Trailing empty path component, ignore it.
      }
      .. {
        # Parent dir; strip last assembled component from result.
        func_dirname $func_normal_abspath_result
        setvar func_normal_abspath_result = "$func_dirname_result"
      }
      * {
        # Actual path component, append it.
        setvar func_normal_abspath_result = "$func_normal_abspath_result/$func_normal_abspath_tcomponent"
      }
    }
  }
  # Restore leading double-slash if one was found on entry.
  setvar func_normal_abspath_result = "$func_normal_abspath_altnamespace$func_normal_abspath_result"
}

# func_relative_path SRCDIR DSTDIR
# generates a relative path from SRCDIR to DSTDIR, with a trailing
# slash if non-empty, suitable for immediately appending a filename
# without needing to append a separator.
#             value returned in "$func_relative_path_result"
proc func_relative_path {
  setvar func_relative_path_result = ''
  func_normal_abspath $1
  setvar func_relative_path_tlibdir = "$func_normal_abspath_result"
  func_normal_abspath $2
  setvar func_relative_path_tbindir = "$func_normal_abspath_result"

  # Ascend the tree starting from libdir
  while : {
    # check if we have found a prefix of bindir
    case (func_relative_path_tbindir) {
      $func_relative_path_tlibdir {
        # found an exact match
        setvar func_relative_path_tcancelled = ''
        break
        }
      $func_relative_path_tlibdir* {
        # found a matching prefix
        func_stripname $func_relative_path_tlibdir '' $func_relative_path_tbindir
        setvar func_relative_path_tcancelled = "$func_stripname_result"
        if test -z $func_relative_path_result {
          setvar func_relative_path_result = '.'
        }
        break
        }
      * {
        func_dirname $func_relative_path_tlibdir
        setvar func_relative_path_tlibdir = ${func_dirname_result}
        if test "x$func_relative_path_tlibdir" = x  {
          # Have to descend all the way to the root!
          setvar func_relative_path_result = "../$func_relative_path_result"
          setvar func_relative_path_tcancelled = "$func_relative_path_tbindir"
          break
        }
        setvar func_relative_path_result = "../$func_relative_path_result"
        }
    }
  }

  # Now calculate path; take care to avoid doubling-up slashes.
  func_stripname '' '/' $func_relative_path_result
  setvar func_relative_path_result = "$func_stripname_result"
  func_stripname '/' '/' $func_relative_path_tcancelled
  if test "x$func_stripname_result" != x  {
    setvar func_relative_path_result = "${func_relative_path_result}/${func_stripname_result}"
  }

  # Normalisation. If bindir is libdir, return empty string,
  # else relative path ending with a slash; either way, target
  # file name can be directly appended.
  if test ! -z $func_relative_path_result {
    func_stripname './' '' "$func_relative_path_result/"
    setvar func_relative_path_result = "$func_stripname_result"
  }
}

# The name of this program:
func_dirname_and_basename $progpath
setvar progname = "$func_basename_result"

# Make sure we have an absolute path for reexecution:
case (progpath) {
  [\\/]*|[A-Za-z]:\\* { }
  *[\\/]* {
     setvar progdir = "$func_dirname_result"
     setvar progdir = $(cd $progdir && pwd)
     setvar progpath = ""$progdir/$progname""
     }
  * {
     setvar save_IFS = "$IFS"
     setvar IFS = ${PATH_SEPARATOR-:}
     for progdir in $PATH {
       setvar IFS = "$save_IFS"
       test -x "$progdir/$progname" && break
     }
     setvar IFS = "$save_IFS"
     test -n $progdir || setvar progdir = $(pwd)
     setvar progpath = ""$progdir/$progname""
     }
}

# Sed substitution that helps us do robust quoting.  It backslashifies
# metacharacters that are still active within double-quoted strings.
setvar Xsed = ""${SED}"' -e 1s/^X//"'
setvar sed_quote_subst = ''s/\([`"$\\]\)/\\\1/g''

# Same as above, but do not quote variable references.
setvar double_quote_subst = ''s/\(["`\\]\)/\\\1/g''

# Sed substitution that turns a string into a regex matching for the
# string literally.
setvar sed_make_literal_regex = ''s,[].[^$\\*\/],\\&,g''

# Sed substitution that converts a w32 file name or path
# which contains forward slashes, into one that contains
# (escaped) backslashes.  A very naive implementation.
setvar lt_sed_naive_backslashify = ''s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g''

# Re-`\' parameter expansions in output of double_quote_subst that were
# `\'-ed in input to the same.  If an odd number of `\' preceded a '$'
# in input to double_quote_subst, that '$' was protected from expansion.
# Since each input `\' is now two `\'s, look for any number of runs of
# four `\'s followed by two `\'s and then a '$'.  `\' that '$'.
setvar bs = ''\\''
setvar bs2 = ''\\\\''
setvar bs4 = ''\\\\\\\\''
setvar dollar = ''\$''
setvar sed_double_backslash = ""\
  s/$bs4/&\\
/g
  s/^$bs2$dollar/$bs&/
  s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g
  s/\n//g""

# Standard options:
setvar opt_dry_run = 'false'
setvar opt_help = 'false'
setvar opt_quiet = 'false'
setvar opt_verbose = 'false'
setvar opt_warning = ':'

# func_echo arg...
# Echo program name prefixed message, along with the current mode
# name if it has been set yet.
proc func_echo {
    $ECHO "$progname: ${opt_mode+$opt_mode: }$[join(ARGV)]"
}

# func_verbose arg...
# Echo program name prefixed message in verbose mode only.
proc func_verbose {
    $opt_verbose && func_echo ${1+"$@"}

    # A bug in bash halts the script if the last line of a function
    # fails when set -e is in force, so we need another command to
    # work around that:
    :
}

# func_echo_all arg...
# Invoke $ECHO with all args, space-separated.
proc func_echo_all {
    $ECHO "$[join(ARGV)]"
}

# func_error arg...
# Echo program name prefixed message to standard error.
proc func_error {
    $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2
}

# func_warning arg...
# Echo program name prefixed warning message to standard error.
proc func_warning {
    $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2

    # bash bug again:
    :
}

# func_fatal_error arg...
# Echo program name prefixed message to standard error, and exit.
proc func_fatal_error {
    func_error ${1+"$@"}
    exit $EXIT_FAILURE
}

# func_fatal_help arg...
# Echo program name prefixed message to standard error, followed by
# a help hint, and exit.
proc func_fatal_help {
    func_error ${1+"$@"}
    func_fatal_error $help
}
setvar help = ""Try \`$progname --help' for more information.""  ## default


# func_grep expression filename
# Check whether EXPRESSION matches any line of FILENAME, without output.
proc func_grep {
    $GREP $1 $2 >/dev/null 2>&1
}


# func_mkdir_p directory-path
# Make sure the entire path to DIRECTORY-PATH is available.
proc func_mkdir_p {
    setvar my_directory_path = "$1"
    setvar my_dir_list = ''

    if test -n $my_directory_path && test $opt_dry_run != ":" {

      # Protect directory names starting with `-'
      case (my_directory_path) {
        -* { setvar my_directory_path = ""./$my_directory_path"" }
      }

      # While some portion of DIR does not yet exist...
      while test ! -d $my_directory_path {
        # ...make a list in topmost first order.  Use a colon delimited
	# list incase some portion of path contains whitespace.
        setvar my_dir_list = ""$my_directory_path:$my_dir_list""

        # If the last portion added has no slash in it, the list is done
        case (my_directory_path) { */* { } * { break } }

        # ...otherwise throw away the child directory and loop
        setvar my_directory_path = $($ECHO $my_directory_path | $SED -e $dirname)
      }
      setvar my_dir_list = $($ECHO $my_dir_list | $SED 's,:*$,,)

      setvar save_mkdir_p_IFS = "$IFS"; setvar IFS = '':''
      for my_dir in $my_dir_list {
	setvar IFS = "$save_mkdir_p_IFS"
        # mkdir can fail with a `File exist' error if two processes
        # try to create one of the directories concurrently.  Don't
        # stop in that case!
        $MKDIR $my_dir 2>/dev/null || :
      }
      setvar IFS = "$save_mkdir_p_IFS"

      # Bail out if we (or some other process) failed to create a directory.
      test -d $my_directory_path || \
        func_fatal_error "Failed to create \`$1'"
    }
}


# func_mktempdir [string]
# Make a temporary directory that won't clash with other running
# libtool processes, and avoids race conditions if possible.  If
# given, STRING is the basename for that directory.
proc func_mktempdir {
    setvar my_template = ""${TMPDIR-/tmp}/${1-$progname}""

    if test $opt_dry_run = ":" {
      # Return a directory name, but don't create it in dry-run mode
      setvar my_tmpdir = ""${my_template}-"$$"
    } else {

      # If mktemp works, use that first and foremost
      setvar my_tmpdir = $(mktemp -d "${my_template}-XXXXXXXX" )

      if test ! -d $my_tmpdir {
        # Failing that, at least try and use $RANDOM to avoid a race
        setvar my_tmpdir = ""${my_template}-${RANDOM-0}"$$"

        setvar save_mktempdir_umask = $(umask)
        umask 0077
        $MKDIR $my_tmpdir
        umask $save_mktempdir_umask
      }

      # If we're not in dry-run mode, bomb out on failure
      test -d $my_tmpdir || \
        func_fatal_error "cannot create temporary directory \`$my_tmpdir'"
    }

    $ECHO $my_tmpdir
}


# func_quote_for_eval arg
# Aesthetically quote ARG to be evaled later.
# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT
# is double-quoted, suitable for a subsequent eval, whereas
# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters
# which are still active within double quotes backslashified.
proc func_quote_for_eval {
    case (1) {
      *[\\\`\"\$]* {
	setvar func_quote_for_eval_unquoted_result = $($ECHO $1 | $SED $sed_quote_subst) }
      * {
        setvar func_quote_for_eval_unquoted_result = "$1" }
    }

    case (func_quote_for_eval_unquoted_result) {
      # Double-quote args containing shell metacharacters to delay
      # word splitting, command substitution and and variable
      # expansion for a subsequent eval.
      # Many Bourne shells cannot handle close brackets correctly
      # in scan sets, so we specify it separately.
      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"" {
        setvar func_quote_for_eval_result = ""\"$func_quote_for_eval_unquoted_result"\""
        }
      * {
        setvar func_quote_for_eval_result = "$func_quote_for_eval_unquoted_result"
    }
}
}


# func_quote_for_expand arg
# Aesthetically quote ARG to be evaled later; same as above,
# but do not quote variable references.
proc func_quote_for_expand {
    case (1) {
      *[\\\`\"]* {
	setvar my_arg = $($ECHO $1 | $SED \
	    -e $double_quote_subst -e $sed_double_backslash) }
      * {
        setvar my_arg = "$1" }
    }

    case (my_arg) {
      # Double-quote args containing shell metacharacters to delay
      # word splitting and command substitution for a subsequent eval.
      # Many Bourne shells cannot handle close brackets correctly
      # in scan sets, so we specify it separately.
      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"" {
        setvar my_arg = ""\"$my_arg"\""
        }
    }

    setvar func_quote_for_expand_result = "$my_arg"
}


# func_show_eval cmd [fail_exp]
# Unless opt_silent is true, then output CMD.  Then, if opt_dryrun is
# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
# is given, then evaluate it.
proc func_show_eval {
    setvar my_cmd = "$1"
    setvar my_fail_exp = "${2-:}"

    ${opt_silent-false} || do {
      func_quote_for_expand $my_cmd
      eval "func_echo $func_quote_for_expand_result"
    }

    if ${opt_dry_run-false} { :; } else {
      eval $my_cmd
      setvar my_status = ""$?
      if test $my_status -eq 0 { :; } else {
	eval "(exit $my_status); $my_fail_exp"
      }
    }
}


# func_show_eval_locale cmd [fail_exp]
# Unless opt_silent is true, then output CMD.  Then, if opt_dryrun is
# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
# is given, then evaluate it.  Use the saved locale for evaluation.
proc func_show_eval_locale {
    setvar my_cmd = "$1"
    setvar my_fail_exp = "${2-:}"

    ${opt_silent-false} || do {
      func_quote_for_expand $my_cmd
      eval "func_echo $func_quote_for_expand_result"
    }

    if ${opt_dry_run-false} { :; } else {
      eval "$lt_user_locale
	    $my_cmd"
      setvar my_status = ""$?
      eval $lt_safe_locale
      if test $my_status -eq 0 { :; } else {
	eval "(exit $my_status); $my_fail_exp"
      }
    }
}

# func_tr_sh
# Turn $1 into a string suitable for a shell variable name.
# Result is stored in $func_tr_sh_result.  All characters
# not in the set a-zA-Z0-9_ are replaced with '_'. Further,
# if $1 begins with a digit, a '_' is prepended as well.
proc func_tr_sh {
  case (1) {
  [0-9]* | *[!a-zA-Z0-9_]* {
    setvar func_tr_sh_result = $($ECHO $1 | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g)
    }
  *  {
    setvar func_tr_sh_result = "$1"
    }
  }
}


# func_version
# Echo version message to standard output and exit.
proc func_version {
    $opt_debug

    $SED -n '/(C)/!b go
	:more
	/\./!{
	  N
	  s/\n# / /
	  b more
	}
	:go
	/^# '$PROGRAM' (GNU /,/# warranty; / {
        s/^# //
	s/^# *$//
        s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/
        p
     }' < "$progpath"
     exit $?
}

# func_usage
# Echo short help message to standard output and exit.
proc func_usage {
    $opt_debug

    $SED -n '/^# Usage:/,/^#  *.*--help/ {
        s/^# //
	s/^# *$//
	s/\$progname/'$progname'/
	p
    }' < "$progpath"
    echo
    $ECHO "run \`$progname --help | more' for full usage"
    exit $?
}

# func_help [NOEXIT]
# Echo long help message to standard output and exit,
# unless 'noexit' is passed as argument.
proc func_help {
    $opt_debug

    $SED -n '/^# Usage:/,/# Report bugs to/ {
	:print
        s/^# //
	s/^# *$//
	s*\$progname*'$progname'*
	s*\$host*'"$host"'*
	s*\$SHELL*'"$SHELL"'*
	s*\$LTCC*'"$LTCC"'*
	s*\$LTCFLAGS*'"$LTCFLAGS"'*
	s*\$LD*'"$LD"'*
	s/\$with_gnu_ld/'"$with_gnu_ld"'/
	s/\$automake_version/'"$(shell {${AUTOMAKE-automake} --version} 2>/dev/null |$SED 1q)"'/
	s/\$autoconf_version/'"$(shell {${AUTOCONF-autoconf} --version} 2>/dev/null |$SED 1q)"'/
	p
	d
     }
     /^# .* home page:/b print
     /^# General help using/b print
     ' < "$progpath"
    setvar ret = ""$?
    if test -z $1 {
      exit $ret
    }
}

# func_missing_arg argname
# Echo program name prefixed message to standard error and set global
# exit_cmd.
proc func_missing_arg {
    $opt_debug

    func_error "missing argument for $1."
    setvar exit_cmd = 'exit'
}


# func_split_short_opt shortopt
# Set func_split_short_opt_name and func_split_short_opt_arg shell
# variables after splitting SHORTOPT after the 2nd character.
proc func_split_short_opt {
    setvar my_sed_short_opt = ''1s/^\(..\).*$/\1/;q''
    setvar my_sed_short_rest = ''1s/^..\(.*\)$/\1/;q''

    setvar func_split_short_opt_name = $($ECHO $1 | $SED $my_sed_short_opt)
    setvar func_split_short_opt_arg = $($ECHO $1 | $SED $my_sed_short_rest)
} # func_split_short_opt may be replaced by extended shell implementation


# func_split_long_opt longopt
# Set func_split_long_opt_name and func_split_long_opt_arg shell
# variables after splitting LONGOPT at the `=' sign.
proc func_split_long_opt {
    setvar my_sed_long_opt = ''1s/^\(--[^=]*\)=.*/\1/;q''
    setvar my_sed_long_arg = ''1s/^--[^=]*=//''

    setvar func_split_long_opt_name = $($ECHO $1 | $SED $my_sed_long_opt)
    setvar func_split_long_opt_arg = $($ECHO $1 | $SED $my_sed_long_arg)
} # func_split_long_opt may be replaced by extended shell implementation

setvar exit_cmd = ':'





setvar magic = ""%%%MAGIC variable%%%""
setvar magic_exe = ""%%%MAGIC EXE variable%%%""

# Global variables.
setvar nonopt = ''
setvar preserve_args = ''
setvar lo2o = ""s/\\.lo\$/.${objext}/""
setvar o2lo = ""s/\\.${objext}\$/.lo/""
setvar extracted_archives = ''
setvar extracted_serial = '0'

# If this variable is set in any of the actions, the command in it
# will be execed at the end.  This prevents here-documents from being
# left over by shells.
setvar exec_cmd = ''

# func_append var value
# Append VALUE to the end of shell variable VAR.
proc func_append {
    eval "${1}=\$${1}\${2}"
} # func_append may be replaced by extended shell implementation

# func_append_quoted var value
# Quote VALUE and append to the end of shell variable VAR, separated
# by a space.
proc func_append_quoted {
    func_quote_for_eval ${2}
    eval "${1}=\$${1}\\ \$func_quote_for_eval_result"
} # func_append_quoted may be replaced by extended shell implementation


# func_arith arithmetic-term...
proc func_arith {
    setvar func_arith_result = $(expr ${@})
} # func_arith may be replaced by extended shell implementation


# func_len string
# STRING may not start with a hyphen.
proc func_len {
    setvar func_len_result = $(expr ${1} : ".*" 2>/dev/null || echo $max_cmd_len)
} # func_len may be replaced by extended shell implementation


# func_lo2o object
proc func_lo2o {
    setvar func_lo2o_result = $($ECHO ${1} | $SED $lo2o)
} # func_lo2o may be replaced by extended shell implementation


# func_xform libobj-or-source
proc func_xform {
    setvar func_xform_result = $($ECHO ${1} | $SED 's/\.[^.]*$/.lo/)
} # func_xform may be replaced by extended shell implementation


# func_fatal_configuration arg...
# Echo program name prefixed message to standard error, followed by
# a configuration failure hint, and exit.
proc func_fatal_configuration {
    func_error ${1+"$@"}
    func_error "See the $PACKAGE documentation for more information."
    func_fatal_error "Fatal configuration error."
}


# func_config
# Display the configuration for all the tags in this script.
proc func_config {
    setvar re_begincf = ''^# ### BEGIN LIBTOOL''
    setvar re_endcf = ''^# ### END LIBTOOL''

    # Default configuration.
    $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"

    # Now print the configurations for the tags.
    for tagname in $taglist {
      $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
    }

    exit $?
}

# func_features
# Display the features supported by this script.
proc func_features {
    echo "host: $host"
    if test $build_libtool_libs = yes {
      echo "enable shared libraries"
    } else {
      echo "disable shared libraries"
    }
    if test $build_old_libs = yes {
      echo "enable static libraries"
    } else {
      echo "disable static libraries"
    }

    exit $?
}

# func_enable_tag tagname
# Verify that TAGNAME is valid, and either flag an error and exit, or
# enable the TAGNAME tag.  We also add TAGNAME to the global $taglist
# variable here.
proc func_enable_tag {
  # Global variable:
  setvar tagname = "$1"

  setvar re_begincf = ""^# ### BEGIN LIBTOOL TAG CONFIG: $tagname"\$"
  setvar re_endcf = ""^# ### END LIBTOOL TAG CONFIG: $tagname"\$"
  setvar sed_extractcf = ""/$re_begincf/,/$re_endcf/p""

  # Validate tagname.
  case (tagname) {
    *[!-_A-Za-z0-9,/]* {
      func_fatal_error "invalid tag name: $tagname"
      }
  }

  # Don't test for the "default" C tag, as we know it's
  # there but not specially marked.
  case (tagname) {
    CC { }
    * {
      if $GREP $re_begincf $progpath >/dev/null 2>&1 {
	setvar taglist = ""$taglist $tagname""

	# Evaluate the configuration.  Be careful to quote the path
	# and the sed script, to avoid splitting on whitespace, but
	# also don't use non-portable quotes within backquotes within
	# quotes we have to do it in 2 steps:
	setvar extractedcf = $($SED -n -e $sed_extractcf )
	eval $extractedcf
      } else {
	func_error "ignoring unknown tag $tagname"
      }
      }
  }
}

# func_check_version_match
# Ensure that we are using m4 macros, and libtool script from the same
# release of libtool.
proc func_check_version_match {
  if test $package_revision != $macro_revision {
    if test $VERSION != $macro_version {
      if test -z $macro_version {
        cat >&2 <<< """
$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
$progname: definition of this LT_INIT comes from an older release.
$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
$progname: and run autoconf again.
"""
      } else {
        cat >&2 <<< """
$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
$progname: and run autoconf again.
"""
      }
    } else {
      cat >&2 <<< """
$progname: Version mismatch error.  This is $PACKAGE $VERSION, revision $package_revision,
$progname: but the definition of this LT_INIT comes from revision $macro_revision.
$progname: You should recreate aclocal.m4 with macros from revision $package_revision
$progname: of $PACKAGE $VERSION and run autoconf again.
"""
    }

    exit $EXIT_MISMATCH
  }
}


# Shorthand for --mode=foo, only valid as the first argument
case (1) {
clean|clea|cle|cl {
  shift; set dummy --mode clean ${1+"$@"}; shift
  }
compile|compil|compi|comp|com|co|c {
  shift; set dummy --mode compile ${1+"$@"}; shift
  }
execute|execut|execu|exec|exe|ex|e {
  shift; set dummy --mode execute ${1+"$@"}; shift
  }
finish|finis|fini|fin|fi|f {
  shift; set dummy --mode finish ${1+"$@"}; shift
  }
install|instal|insta|inst|ins|in|i {
  shift; set dummy --mode install ${1+"$@"}; shift
  }
link|lin|li|l {
  shift; set dummy --mode link ${1+"$@"}; shift
  }
uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u {
  shift; set dummy --mode uninstall ${1+"$@"}; shift
  }
}



# Option defaults:
setvar opt_debug = ':'
setvar opt_dry_run = 'false'
setvar opt_config = 'false'
setvar opt_preserve_dup_deps = 'false'
setvar opt_features = 'false'
setvar opt_finish = 'false'
setvar opt_help = 'false'
setvar opt_help_all = 'false'
setvar opt_silent = ':'
setvar opt_warning = ':'
setvar opt_verbose = ':'
setvar opt_silent = 'false'
setvar opt_verbose = 'false'


# Parse options once, thoroughly.  This comes as soon as possible in the
# script to make things like `--version' happen as quickly as we can.
do {
  # this just eases exit handling
  while test $Argc -gt 0 {
    setvar opt = "$1"
    shift
    case (opt) {
      --debug|-x {	setvar opt_debug = ''set -x''
			func_echo "enabling shell trace mode"
			$opt_debug
			}
      --dry-run|--dryrun|-n {
			setvar opt_dry_run = ':'
			}
      --config {
			setvar opt_config = ':'
func_config
			}
      --dlopen|-dlopen {
			setvar optarg = "$1"
			setvar opt_dlopen = ""${opt_dlopen+$opt_dlopen
}$optarg""
			shift
			}
      --preserve-dup-deps {
			setvar opt_preserve_dup_deps = ':'
			}
      --features {
			setvar opt_features = ':'
func_features
			}
      --finish {
			setvar opt_finish = ':'
set dummy --mode finish ${1+"$@"}; shift
			}
      --help {
			setvar opt_help = ':'
			}
      --help-all {
			setvar opt_help_all = ':'
setvar opt_help = '': help-all''
			}
      --mode {
			test $Argc = 0 && func_missing_arg $opt && break
			setvar optarg = "$1"
			setvar opt_mode = "$optarg"
case (optarg) {
  # Valid mode arguments:
  clean|compile|execute|finish|install|link|relink|uninstall { }

  # Catch anything else as an error
  * { func_error "invalid argument for $opt"
     setvar exit_cmd = 'exit'
     break
     }
}
			shift
			}
      --no-silent|--no-quiet {
			setvar opt_silent = 'false'
func_append preserve_args " $opt"
			}
      --no-warning|--no-warn {
			setvar opt_warning = 'false'
func_append preserve_args " $opt"
			}
      --no-verbose {
			setvar opt_verbose = 'false'
func_append preserve_args " $opt"
			}
      --silent|--quiet {
			setvar opt_silent = ':'
func_append preserve_args " $opt"
        setvar opt_verbose = 'false'
			}
      --verbose|-v {
			setvar opt_verbose = ':'
func_append preserve_args " $opt"
setvar opt_silent = 'false'
			}
      --tag {
			test $Argc = 0 && func_missing_arg $opt && break
			setvar optarg = "$1"
			setvar opt_tag = "$optarg"
func_append preserve_args " $opt $optarg"
func_enable_tag $optarg
			shift
			}

      -\?|-h {		func_usage				}
      --help {		func_help				}
      --version {	func_version				}

      # Separate optargs to long options:
      --*=* {
			func_split_long_opt $opt
			set dummy $func_split_long_opt_name $func_split_long_opt_arg ${1+"$@"}
			shift
			}

      # Separate non-argument short options:
      -\?*|-h*|-n*|-v* {
			func_split_short_opt $opt
			set dummy $func_split_short_opt_name "-$func_split_short_opt_arg" ${1+"$@"}
			shift
			}

      -- {		break					}
      -* {		func_fatal_help "unrecognized option \`$opt'" }
      * {		set dummy $opt ${1+"$@"};	shift; break  }
    }
  }

  # Validate options:

  # save first non-option argument
  if test "$Argc" -gt 0 {
    setvar nonopt = "$opt"
    shift
  }

  # preserve --debug
  test $opt_debug = : || func_append preserve_args " --debug"

  case (host) {
    *cygwin* | *mingw* | *pw32* | *cegcc* {
      # don't eliminate duplications in $postdeps and $predeps
      setvar opt_duplicate_compiler_generated_deps = ':'
      }
    * {
      setvar opt_duplicate_compiler_generated_deps = "$opt_preserve_dup_deps"
      }
  }

  $opt_help || do {
    # Sanity checks first:
    func_check_version_match

    if test $build_libtool_libs != yes && test $build_old_libs != yes {
      func_fatal_configuration "not configured to build any kind of library"
    }

    # Darwin sucks
    eval std_shrext='"'$shrext_cmds'"'

    # Only execute mode is allowed to have -dlopen flags.
    if test -n $opt_dlopen && test $opt_mode != execute {
      func_error "unrecognized option \`-dlopen'"
      $ECHO $help 1>&2
      exit $EXIT_FAILURE
    }

    # Change the help message to a mode-specific one.
    setvar generic_help = "$help"
    setvar help = ""Try \`$progname --help --mode=$opt_mode' for more information.""
  }


  # Bail if the options were screwed
  $exit_cmd $EXIT_FAILURE
}




## ----------- ##
##    Main.    ##
## ----------- ##

# func_lalib_p file
# True iff FILE is a libtool `.la' library or `.lo' object file.
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
proc func_lalib_p {
    test -f $1 &&
      $SED -e 4q $1 2>/dev/null \
        | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
}

# func_lalib_unsafe_p file
# True iff FILE is a libtool `.la' library or `.lo' object file.
# This function implements the same check as func_lalib_p without
# resorting to external programs.  To this end, it redirects stdin and
# closes it afterwards, without saving the original file descriptor.
# As a safety measure, use it only where a negative result would be
# fatal anyway.  Works if `file' does not exist.
proc func_lalib_unsafe_p {
    setvar lalib_p = 'no'
    if test -f $1 && test -r $1 && exec 5<&0 <"$1" {
	for lalib_p_l in 1 2 3 4
	{
	    read lalib_p_line
	    case (lalib_p_line) {
		\#\ Generated\ by\ *$PACKAGE*  { setvar lalib_p = 'yes'; break}
	    }
	}
	exec 0<&5 5<&-
    }
    test $lalib_p = yes
}

# func_ltwrapper_script_p file
# True iff FILE is a libtool wrapper script
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
proc func_ltwrapper_script_p {
    func_lalib_p $1
}

# func_ltwrapper_executable_p file
# True iff FILE is a libtool wrapper executable
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
proc func_ltwrapper_executable_p {
    setvar func_ltwrapper_exec_suffix = ''
    case (1) {
    *.exe { }
    * { setvar func_ltwrapper_exec_suffix = '.exe' }
    }
    $GREP $magic_exe "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
}

# func_ltwrapper_scriptname file
# Assumes file is an ltwrapper_executable
# uses $file to determine the appropriate filename for a
# temporary ltwrapper_script.
proc func_ltwrapper_scriptname {
    func_dirname_and_basename $1 "" "."
    func_stripname '' '.exe' $func_basename_result
    setvar func_ltwrapper_scriptname_result = ""$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper""
}

# func_ltwrapper_p file
# True iff FILE is a libtool wrapper script or wrapper executable
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
proc func_ltwrapper_p {
    func_ltwrapper_script_p $1 || func_ltwrapper_executable_p $1
}


# func_execute_cmds commands fail_cmd
# Execute tilde-delimited COMMANDS.
# If FAIL_CMD is given, eval that upon failure.
# FAIL_CMD may read-access the current command in variable CMD!
proc func_execute_cmds {
    $opt_debug
    setvar save_ifs = "$IFS"; setvar IFS = ''~''
    for cmd in $1 {
      setvar IFS = "$save_ifs"
      eval cmd='"'$cmd'"'
      func_show_eval $cmd ${2-:}
    }
    setvar IFS = "$save_ifs"
}


# func_source file
# Source FILE, adding directory component if necessary.
# Note that it is not necessary on cygwin/mingw to append a dot to
# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
# behavior happens only for exec(3), not for open(2)!  Also, sourcing
# `FILE.' does not work on cygwin managed mounts.
proc func_source {
    $opt_debug
    case (1) {
    */* | *\\* {	source "$1" }
    * {		source "./$1" }
    }
}


# func_resolve_sysroot PATH
# Replace a leading = in PATH with a sysroot.  Store the result into
# func_resolve_sysroot_result
proc func_resolve_sysroot {
  setvar func_resolve_sysroot_result = "$1"
  case (func_resolve_sysroot_result) {
  =* {
    func_stripname '=' '' $func_resolve_sysroot_result
    setvar func_resolve_sysroot_result = "$lt_sysroot$func_stripname_result"
    }
  }
}

# func_replace_sysroot PATH
# If PATH begins with the sysroot, replace it with = and
# store the result into func_replace_sysroot_result.
proc func_replace_sysroot {
  case{
  ?*:"$lt_sysroot"* {
    func_stripname $lt_sysroot '' $1
    setvar func_replace_sysroot_result = ""=$func_stripname_result""
    }
  * {
    # Including no sysroot.
    setvar func_replace_sysroot_result = "$1"
    }
  }
}

# func_infer_tag arg
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base compile
# command doesn't match the default compiler.
# arg is usually of the form 'gcc ...'
proc func_infer_tag {
    $opt_debug
    if test -n $available_tags && test -z $tagname {
      setvar CC_quoted = ''
      for arg in $CC {
	func_append_quoted CC_quoted $arg
      }
      setvar CC_expanded = $(func_echo_all $CC)
      setvar CC_quoted_expanded = $(func_echo_all $CC_quoted)
      case (@) {
      # Blanks in the command may have been stripped by the calling shell,
      # but not from the CC environment variable when configure was run.
      " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
      " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "* { }
      # Blanks at the start of $base_compile will cause this to fail
      # if we don't check for them as well.
      * {
	for z in $available_tags {
	  if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null {
	    # Evaluate the configuration.
	    eval $(${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' )
	    setvar CC_quoted = ''
	    for arg in $CC {
	      # Double-quote args containing other shell metacharacters.
	      func_append_quoted CC_quoted $arg
	    }
	    setvar CC_expanded = $(func_echo_all $CC)
	    setvar CC_quoted_expanded = $(func_echo_all $CC_quoted)
	    case{
	    " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
	    " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "* {
	      # The compiler in the base compile command matches
	      # the one in the tagged configuration.
	      # Assume this is the tagged configuration we want.
	      setvar tagname = "$z"
	      break
	      }
	    }
	  }
	}
	# If $tagname still isn't set, then no tagged configuration
	# was found and let the user know that the "--tag" command
	# line option must be used.
	if test -z $tagname {
	  func_echo "unable to infer tagged configuration"
	  func_fatal_error "specify a tag with \`--tag'"
#	else
#	  func_verbose "using $tagname tagged configuration"
	}
	}
      }
    }
}



# func_write_libtool_object output_name pic_name nonpic_name
# Create a libtool object file (analogous to a ".la" file),
# but don't create it if we're doing a dry run.
proc func_write_libtool_object {
    setvar write_libobj = ${1}
    if test $build_libtool_libs = yes {
      setvar write_lobj = "'''${2}'''"
    } else {
      setvar write_lobj = 'none'
    }

    if test $build_old_libs = yes {
      setvar write_oldobj = "'''${3}'''"
    } else {
      setvar write_oldobj = 'none'
    }

    $opt_dry_run || do {
      cat >${write_libobj}T <<< """
# $write_libobj - a libtool object file
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# Please DO NOT delete this file!
# It is necessary for linking the library.

# Name of the PIC object.
pic_object=$write_lobj

# Name of the non-PIC object
non_pic_object=$write_oldobj

"""
      $MV "${write_libobj}T" ${write_libobj}
    }
}


##################################################
# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS #
##################################################

# func_convert_core_file_wine_to_w32 ARG
# Helper function used by file name conversion functions when $build is *nix,
# and $host is mingw, cygwin, or some other w32 environment. Relies on a
# correctly configured wine environment available, with the winepath program
# in $build's $PATH.
#
# ARG is the $build file name to be converted to w32 format.
# Result is available in $func_convert_core_file_wine_to_w32_result, and will
# be empty on error (or when ARG is empty)
proc func_convert_core_file_wine_to_w32 {
  $opt_debug
  setvar func_convert_core_file_wine_to_w32_result = "$1"
  if test -n $1 {
    # Unfortunately, winepath does not exit with a non-zero error code, so we
    # are forced to check the contents of stdout. On the other hand, if the
    # command is not found, the shell will set an exit code of 127 and print
    # *an error message* to stdout. So we must check for both error code of
    # zero AND non-empty stdout, which explains the odd construction:
    setvar func_convert_core_file_wine_to_w32_tmp = $(winepath -w $1 )
    if test "$?" -eq 0 && test -n ${func_convert_core_file_wine_to_w32_tmp} {
      setvar func_convert_core_file_wine_to_w32_result = $($ECHO $func_convert_core_file_wine_to_w32_tmp |
        $SED -e $lt_sed_naive_backslashify)
    } else {
      setvar func_convert_core_file_wine_to_w32_result = ''
    }
  }
}
# end: func_convert_core_file_wine_to_w32


# func_convert_core_path_wine_to_w32 ARG
# Helper function used by path conversion functions when $build is *nix, and
# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly
# configured wine environment available, with the winepath program in $build's
# $PATH. Assumes ARG has no leading or trailing path separator characters.
#
# ARG is path to be converted from $build format to win32.
# Result is available in $func_convert_core_path_wine_to_w32_result.
# Unconvertible file (directory) names in ARG are skipped; if no directory names
# are convertible, then the result may be empty.
proc func_convert_core_path_wine_to_w32 {
  $opt_debug
  # unfortunately, winepath doesn't convert paths, only file names
  setvar func_convert_core_path_wine_to_w32_result = """"
  if test -n $1 {
    setvar oldIFS = "$IFS"
    setvar IFS = ':'
    for func_convert_core_path_wine_to_w32_f in $1 {
      setvar IFS = "$oldIFS"
      func_convert_core_file_wine_to_w32 $func_convert_core_path_wine_to_w32_f
      if test -n $func_convert_core_file_wine_to_w32_result  {
        if test -z $func_convert_core_path_wine_to_w32_result {
          setvar func_convert_core_path_wine_to_w32_result = "$func_convert_core_file_wine_to_w32_result"
        } else {
          func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result"
        }
      }
    }
    setvar IFS = "$oldIFS"
  }
}
# end: func_convert_core_path_wine_to_w32


# func_cygpath ARGS...
# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when
# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2)
# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or
# (2), returns the Cygwin file name or path in func_cygpath_result (input
# file name or path is assumed to be in w32 format, as previously converted
# from $build's *nix or MSYS format). In case (3), returns the w32 file name
# or path in func_cygpath_result (input file name or path is assumed to be in
# Cygwin format). Returns an empty string on error.
#
# ARGS are passed to cygpath, with the last one being the file name or path to
# be converted.
#
# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH
# environment variable; do not put it in $PATH.
proc func_cygpath {
  $opt_debug
  if test -n $LT_CYGPATH && test -f $LT_CYGPATH {
    setvar func_cygpath_result = $($LT_CYGPATH @ARGV )
    if test "$?" -ne 0 {
      # on failure, ensure result is empty
      setvar func_cygpath_result = ''
    }
  } else {
    setvar func_cygpath_result = ''
    func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'"
  }
}
#end: func_cygpath


# func_convert_core_msys_to_w32 ARG
# Convert file name or path ARG from MSYS format to w32 format.  Return
# result in func_convert_core_msys_to_w32_result.
proc func_convert_core_msys_to_w32 {
  $opt_debug
  # awkward: cmd appends spaces to result
  setvar func_convert_core_msys_to_w32_result = $(shell { cmd //c echo $1 } 2>/dev/null |
    $SED -e 's/[ ]*$//' -e $lt_sed_naive_backslashify)
}
#end: func_convert_core_msys_to_w32


# func_convert_file_check ARG1 ARG2
# Verify that ARG1 (a file name in $build format) was converted to $host
# format in ARG2. Otherwise, emit an error message, but continue (resetting
# func_to_host_file_result to ARG1).
proc func_convert_file_check {
  $opt_debug
  if test -z $2 && test -n $1  {
    func_error "Could not determine host file name corresponding to"
    func_error "  \`$1'"
    func_error "Continuing, but uninstalled executables may not work."
    # Fallback:
    setvar func_to_host_file_result = "$1"
  }
}
# end func_convert_file_check


# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH
# Verify that FROM_PATH (a path in $build format) was converted to $host
# format in TO_PATH. Otherwise, emit an error message, but continue, resetting
# func_to_host_file_result to a simplistic fallback value (see below).
proc func_convert_path_check {
  $opt_debug
  if test -z $4 && test -n $3 {
    func_error "Could not determine the host path corresponding to"
    func_error "  \`$3'"
    func_error "Continuing, but uninstalled executables may not work."
    # Fallback.  This is a deliberately simplistic "conversion" and
    # should not be "improved".  See libtool.info.
    if test "x$1" != "x$2" {
      setvar lt_replace_pathsep_chars = ""s|$1|$2|g""
      setvar func_to_host_path_result = $(echo $3 |
        $SED -e $lt_replace_pathsep_chars)
    } else {
      setvar func_to_host_path_result = "$3"
    }
  }
}
# end func_convert_path_check


# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG
# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT
# and appending REPL if ORIG matches BACKPAT.
proc func_convert_path_front_back_pathsep {
  $opt_debug
  case (4) {
  $1  { setvar func_to_host_path_result = ""$3$func_to_host_path_result""
    }
  }
  case (4) {
  $2  { func_append func_to_host_path_result $3
    }
  }
}
# end func_convert_path_front_back_pathsep


##################################################
# $build to $host FILE NAME CONVERSION FUNCTIONS #
##################################################
# invoked via `$to_host_file_cmd ARG'
#
# In each case, ARG is the path to be converted from $build to $host format.
# Result will be available in $func_to_host_file_result.


# func_to_host_file ARG
# Converts the file name ARG from $build format to $host format. Return result
# in func_to_host_file_result.
proc func_to_host_file {
  $opt_debug
  $to_host_file_cmd $1
}
# end func_to_host_file


# func_to_tool_file ARG LAZY
# converts the file name ARG from $build format to toolchain format. Return
# result in func_to_tool_file_result.  If the conversion in use is listed
# in (the comma separated) LAZY, no conversion takes place.
proc func_to_tool_file {
  $opt_debug
  case{
    *,"$to_tool_file_cmd",* {
      setvar func_to_tool_file_result = "$1"
      }
    * {
      $to_tool_file_cmd $1
      setvar func_to_tool_file_result = "$func_to_host_file_result"
      }
  }
}
# end func_to_tool_file


# func_convert_file_noop ARG
# Copy ARG to func_to_host_file_result.
proc func_convert_file_noop {
  setvar func_to_host_file_result = "$1"
}
# end func_convert_file_noop


# func_convert_file_msys_to_w32 ARG
# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic
# conversion to w32 is not available inside the cwrapper.  Returns result in
# func_to_host_file_result.
proc func_convert_file_msys_to_w32 {
  $opt_debug
  setvar func_to_host_file_result = "$1"
  if test -n $1 {
    func_convert_core_msys_to_w32 $1
    setvar func_to_host_file_result = "$func_convert_core_msys_to_w32_result"
  }
  func_convert_file_check $1 $func_to_host_file_result
}
# end func_convert_file_msys_to_w32


# func_convert_file_cygwin_to_w32 ARG
# Convert file name ARG from Cygwin to w32 format.  Returns result in
# func_to_host_file_result.
proc func_convert_file_cygwin_to_w32 {
  $opt_debug
  setvar func_to_host_file_result = "$1"
  if test -n $1 {
    # because $build is cygwin, we call "the" cygpath in $PATH; no need to use
    # LT_CYGPATH in this case.
    setvar func_to_host_file_result = $(cygpath -m $1)
  }
  func_convert_file_check $1 $func_to_host_file_result
}
# end func_convert_file_cygwin_to_w32


# func_convert_file_nix_to_w32 ARG
# Convert file name ARG from *nix to w32 format.  Requires a wine environment
# and a working winepath. Returns result in func_to_host_file_result.
proc func_convert_file_nix_to_w32 {
  $opt_debug
  setvar func_to_host_file_result = "$1"
  if test -n $1 {
    func_convert_core_file_wine_to_w32 $1
    setvar func_to_host_file_result = "$func_convert_core_file_wine_to_w32_result"
  }
  func_convert_file_check $1 $func_to_host_file_result
}
# end func_convert_file_nix_to_w32


# func_convert_file_msys_to_cygwin ARG
# Convert file name ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
# Returns result in func_to_host_file_result.
proc func_convert_file_msys_to_cygwin {
  $opt_debug
  setvar func_to_host_file_result = "$1"
  if test -n $1 {
    func_convert_core_msys_to_w32 $1
    func_cygpath -u $func_convert_core_msys_to_w32_result
    setvar func_to_host_file_result = "$func_cygpath_result"
  }
  func_convert_file_check $1 $func_to_host_file_result
}
# end func_convert_file_msys_to_cygwin


# func_convert_file_nix_to_cygwin ARG
# Convert file name ARG from *nix to Cygwin format.  Requires Cygwin installed
# in a wine environment, working winepath, and LT_CYGPATH set.  Returns result
# in func_to_host_file_result.
proc func_convert_file_nix_to_cygwin {
  $opt_debug
  setvar func_to_host_file_result = "$1"
  if test -n $1 {
    # convert from *nix to w32, then use cygpath to convert from w32 to cygwin.
    func_convert_core_file_wine_to_w32 $1
    func_cygpath -u $func_convert_core_file_wine_to_w32_result
    setvar func_to_host_file_result = "$func_cygpath_result"
  }
  func_convert_file_check $1 $func_to_host_file_result
}
# end func_convert_file_nix_to_cygwin


#############################################
# $build to $host PATH CONVERSION FUNCTIONS #
#############################################
# invoked via `$to_host_path_cmd ARG'
#
# In each case, ARG is the path to be converted from $build to $host format.
# The result will be available in $func_to_host_path_result.
#
# Path separators are also converted from $build format to $host format.  If
# ARG begins or ends with a path separator character, it is preserved (but
# converted to $host format) on output.
#
# All path conversion functions are named using the following convention:
#   file name conversion function    : func_convert_file_X_to_Y ()
#   path conversion function         : func_convert_path_X_to_Y ()
# where, for any given $build/$host combination the 'X_to_Y' value is the
# same.  If conversion functions are added for new $build/$host combinations,
# the two new functions must follow this pattern, or func_init_to_host_path_cmd
# will break.


# func_init_to_host_path_cmd
# Ensures that function "pointer" variable $to_host_path_cmd is set to the
# appropriate value, based on the value of $to_host_file_cmd.
setvar to_host_path_cmd = ''
proc func_init_to_host_path_cmd {
  $opt_debug
  if test -z $to_host_path_cmd {
    func_stripname 'func_convert_file_' '' $to_host_file_cmd
    setvar to_host_path_cmd = ""func_convert_path_${func_stripname_result}""
  }
}


# func_to_host_path ARG
# Converts the path ARG from $build format to $host format. Return result
# in func_to_host_path_result.
proc func_to_host_path {
  $opt_debug
  func_init_to_host_path_cmd
  $to_host_path_cmd $1
}
# end func_to_host_path


# func_convert_path_noop ARG
# Copy ARG to func_to_host_path_result.
proc func_convert_path_noop {
  setvar func_to_host_path_result = "$1"
}
# end func_convert_path_noop


# func_convert_path_msys_to_w32 ARG
# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic
# conversion to w32 is not available inside the cwrapper.  Returns result in
# func_to_host_path_result.
proc func_convert_path_msys_to_w32 {
  $opt_debug
  setvar func_to_host_path_result = "$1"
  if test -n $1 {
    # Remove leading and trailing path separator characters from ARG.  MSYS
    # behavior is inconsistent here; cygpath turns them into '.;' and ';.';
    # and winepath ignores them completely.
    func_stripname : : $1
    setvar func_to_host_path_tmp1 = "$func_stripname_result"
    func_convert_core_msys_to_w32 $func_to_host_path_tmp1
    setvar func_to_host_path_result = "$func_convert_core_msys_to_w32_result"
    func_convert_path_check : ";" \
      $func_to_host_path_tmp1 $func_to_host_path_result
    func_convert_path_front_back_pathsep ":*" "*:" ";" $1
  }
}
# end func_convert_path_msys_to_w32


# func_convert_path_cygwin_to_w32 ARG
# Convert path ARG from Cygwin to w32 format.  Returns result in
# func_to_host_file_result.
proc func_convert_path_cygwin_to_w32 {
  $opt_debug
  setvar func_to_host_path_result = "$1"
  if test -n $1 {
    # See func_convert_path_msys_to_w32:
    func_stripname : : $1
    setvar func_to_host_path_tmp1 = "$func_stripname_result"
    setvar func_to_host_path_result = $(cygpath -m -p $func_to_host_path_tmp1)
    func_convert_path_check : ";" \
      $func_to_host_path_tmp1 $func_to_host_path_result
    func_convert_path_front_back_pathsep ":*" "*:" ";" $1
  }
}
# end func_convert_path_cygwin_to_w32


# func_convert_path_nix_to_w32 ARG
# Convert path ARG from *nix to w32 format.  Requires a wine environment and
# a working winepath.  Returns result in func_to_host_file_result.
proc func_convert_path_nix_to_w32 {
  $opt_debug
  setvar func_to_host_path_result = "$1"
  if test -n $1 {
    # See func_convert_path_msys_to_w32:
    func_stripname : : $1
    setvar func_to_host_path_tmp1 = "$func_stripname_result"
    func_convert_core_path_wine_to_w32 $func_to_host_path_tmp1
    setvar func_to_host_path_result = "$func_convert_core_path_wine_to_w32_result"
    func_convert_path_check : ";" \
      $func_to_host_path_tmp1 $func_to_host_path_result
    func_convert_path_front_back_pathsep ":*" "*:" ";" $1
  }
}
# end func_convert_path_nix_to_w32


# func_convert_path_msys_to_cygwin ARG
# Convert path ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
# Returns result in func_to_host_file_result.
proc func_convert_path_msys_to_cygwin {
  $opt_debug
  setvar func_to_host_path_result = "$1"
  if test -n $1 {
    # See func_convert_path_msys_to_w32:
    func_stripname : : $1
    setvar func_to_host_path_tmp1 = "$func_stripname_result"
    func_convert_core_msys_to_w32 $func_to_host_path_tmp1
    func_cygpath -u -p $func_convert_core_msys_to_w32_result
    setvar func_to_host_path_result = "$func_cygpath_result"
    func_convert_path_check : : \
      $func_to_host_path_tmp1 $func_to_host_path_result
    func_convert_path_front_back_pathsep ":*" "*:" : $1
  }
}
# end func_convert_path_msys_to_cygwin


# func_convert_path_nix_to_cygwin ARG
# Convert path ARG from *nix to Cygwin format.  Requires Cygwin installed in a
# a wine environment, working winepath, and LT_CYGPATH set.  Returns result in
# func_to_host_file_result.
proc func_convert_path_nix_to_cygwin {
  $opt_debug
  setvar func_to_host_path_result = "$1"
  if test -n $1 {
    # Remove leading and trailing path separator characters from
    # ARG. msys behavior is inconsistent here, cygpath turns them
    # into '.;' and ';.', and winepath ignores them completely.
    func_stripname : : $1
    setvar func_to_host_path_tmp1 = "$func_stripname_result"
    func_convert_core_path_wine_to_w32 $func_to_host_path_tmp1
    func_cygpath -u -p $func_convert_core_path_wine_to_w32_result
    setvar func_to_host_path_result = "$func_cygpath_result"
    func_convert_path_check : : \
      $func_to_host_path_tmp1 $func_to_host_path_result
    func_convert_path_front_back_pathsep ":*" "*:" : $1
  }
}
# end func_convert_path_nix_to_cygwin


# func_mode_compile arg...
proc func_mode_compile {
    $opt_debug
    # Get the compilation command and the source file.
    setvar base_compile = ''
    setvar srcfile = "$nonopt"  #  always keep a non-empty value in "srcfile"
    setvar suppress_opt = 'yes'
    setvar suppress_output = ''
    setvar arg_mode = 'normal'
    setvar libobj = ''
    setvar later = ''
    setvar pie_flag = ''for arg in @ARGV {
      case (arg_mode) {
      arg   {
	# do not "continue".  Instead, add this to base_compile
	setvar lastarg = "$arg"
	setvar arg_mode = 'normal'
	}

      target  {
	setvar libobj = "$arg"
	setvar arg_mode = 'normal'
	continue
	}

      normal  {
	# Accept any command-line options.
	case (arg) {
	-o {
	  test -n $libobj && \
	    func_fatal_error "you cannot specify \`-o' more than once"
	  setvar arg_mode = 'target'
	  continue
	  }

	-pie | -fpie | -fPIE {
          func_append pie_flag " $arg"
	  continue
	  }

	-shared | -static | -prefer-pic | -prefer-non-pic {
	  func_append later " $arg"
	  continue
	  }

	-no-suppress {
	  setvar suppress_opt = 'no'
	  continue
	  }

	-Xcompiler {
	  setvar arg_mode = 'arg'  #  the next one goes into the "base_compile" arg list
	  continue      #  The current "srcfile" will either be retained or
	  }            #  replaced later.  I would guess that would be a bug.

	-Wc,* {
	  func_stripname '-Wc,' '' $arg
	  setvar args = "$func_stripname_result"
	  setvar lastarg = ''
	  setvar save_ifs = "$IFS"; setvar IFS = '',''
	  for arg in $args {
	    setvar IFS = "$save_ifs"
	    func_append_quoted lastarg $arg
	  }
	  setvar IFS = "$save_ifs"
	  func_stripname ' ' '' $lastarg
	  setvar lastarg = "$func_stripname_result"

	  # Add the arguments to base_compile.
	  func_append base_compile " $lastarg"
	  continue
	  }

	* {
	  # Accept the current argument as the source file.
	  # The previous "srcfile" becomes the current argument.
	  #
	  setvar lastarg = "$srcfile"
	  setvar srcfile = "$arg"
	  }
	}  #  case $arg
	}
      }    #  case $arg_mode

      # Aesthetically quote the previous argument.
      func_append_quoted base_compile $lastarg
    } # for arg

    case (arg_mode) {
    arg {
      func_fatal_error "you must specify an argument for -Xcompile"
      }
    target {
      func_fatal_error "you must specify a target with \`-o'"
      }
    * {
      # Get the name of the library object.
      test -z $libobj && do {
	func_basename $srcfile
	setvar libobj = "$func_basename_result"
      }
      }
    }

    # Recognize several different file suffixes.
    # If the user specifies -o file.o, it is replaced with file.lo
    case (libobj) {
    *.[cCFSifmso] | \
    *.ada | *.adb | *.ads | *.asm | \
    *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
    *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup {
      func_xform $libobj
      setvar libobj = "$func_xform_result"
      }
    }

    case (libobj) {
    *.lo { func_lo2o $libobj; setvar obj = "$func_lo2o_result" }
    * {
      func_fatal_error "cannot determine name of library object from \`$libobj'"
      }
    }

    func_infer_tag $base_compile

    for arg in $later {
      case (arg) {
      -shared {
	test $build_libtool_libs != yes && \
	  func_fatal_configuration "can not build a shared library"
	setvar build_old_libs = 'no'
	continue
	}

      -static {
	setvar build_libtool_libs = 'no'
	setvar build_old_libs = 'yes'
	continue
	}

      -prefer-pic {
	setvar pic_mode = 'yes'
	continue
	}

      -prefer-non-pic {
	setvar pic_mode = 'no'
	continue
	}
      }
    }

    func_quote_for_eval $libobj
    test "X$libobj" != "X$func_quote_for_eval_result" \
      && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"'	 &()|`$[]' \
      && func_warning "libobj name \`$libobj' may not contain shell special characters."
    func_dirname_and_basename $obj "/" ""
    setvar objname = "$func_basename_result"
    setvar xdir = "$func_dirname_result"
    setvar lobj = "${xdir}$objdir/$objname"

    test -z $base_compile && \
      func_fatal_help "you must specify a compilation command"

    # Delete any leftover library objects.
    if test $build_old_libs = yes {
      setvar removelist = ""$obj $lobj $libobj ${libobj}T""
    } else {
      setvar removelist = ""$lobj $libobj ${libobj}T""
    }

    # On Cygwin there's no "real" PIC flag so we must build both object types
    case (host_os) {
    cygwin* | mingw* | pw32* | os2* | cegcc* {
      setvar pic_mode = 'default'
      }
    }
    if test $pic_mode = no && test $deplibs_check_method != pass_all {
      # non-PIC code in shared libraries is not supported
      setvar pic_mode = 'default'
    }

    # Calculate the filename of the output object if compiler does
    # not support -o with -c
    if test $compiler_c_o = no {
      setvar output_obj = "$($ECHO $srcfile | $SED 's%^.*/%%; s%\.[^.]*$%%).${objext}"
      setvar lockfile = ""$output_obj.lock""
    } else {
      setvar output_obj = ''
      setvar need_locks = 'no'
      setvar lockfile = ''
    }

    # Lock this critical section if it is needed
    # We use this script file to make the link, it avoids creating a new file
    if test $need_locks = yes {
      while ! $opt_dry_run || ln $progpath $lockfile  {
	func_echo "Waiting for $lockfile to be removed"
	sleep 2
      }
    } elif test $need_locks = warn {
      if test -f $lockfile {
	$ECHO "\
*** ERROR, $lockfile exists and contains:
$(cat $lockfile )

This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together.  If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."

	$opt_dry_run || $RM $removelist
	exit $EXIT_FAILURE
      }
      func_append removelist " $output_obj"
      $ECHO $srcfile > "$lockfile"
    }

    $opt_dry_run || $RM $removelist
    func_append removelist " $lockfile"
    trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15

    func_to_tool_file $srcfile func_convert_file_msys_to_w32
    setvar srcfile = "$func_to_tool_file_result"
    func_quote_for_eval $srcfile
    setvar qsrcfile = "$func_quote_for_eval_result"

    # Only build a PIC object if we are building libtool libraries.
    if test $build_libtool_libs = yes {
      # Without this assignment, base_compile gets emptied.
      setvar fbsd_hideous_sh_bug = "$base_compile"

      if test $pic_mode != no {
	setvar command = ""$base_compile $qsrcfile $pic_flag""
      } else {
	# Don't build PIC code
	setvar command = ""$base_compile $qsrcfile""
      }

      func_mkdir_p "$xdir$objdir"

      if test -z $output_obj {
	# Place PIC objects in $objdir
	func_append command " -o $lobj"
      }

      func_show_eval_locale $command	\
          'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'

      if test $need_locks = warn &&
	 test "X$(cat $lockfile )" != "X$srcfile" {
	$ECHO "\
*** ERROR, $lockfile contains:
$(cat $lockfile )

but it should contain:
$srcfile

This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together.  If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."

	$opt_dry_run || $RM $removelist
	exit $EXIT_FAILURE
      }

      # Just move the object if needed, then go on to compile the next one
      if test -n $output_obj && test "X$output_obj" != "X$lobj" {
	func_show_eval '$MV "$output_obj" "$lobj"' \
	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
      }

      # Allow error messages only from the first compilation.
      if test $suppress_opt = yes {
	setvar suppress_output = '' >/dev/null 2>&1''
      }
    }

    # Only build a position-dependent object if we build old libraries.
    if test $build_old_libs = yes {
      if test $pic_mode != yes {
	# Don't build PIC code
	setvar command = ""$base_compile $qsrcfile$pie_flag""
      } else {
	setvar command = ""$base_compile $qsrcfile $pic_flag""
      }
      if test $compiler_c_o = yes {
	func_append command " -o $obj"
      }

      # Suppress compiler output if we already did a PIC compilation.
      func_append command $suppress_output
      func_show_eval_locale $command \
        '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'

      if test $need_locks = warn &&
	 test "X$(cat $lockfile )" != "X$srcfile" {
	$ECHO "\
*** ERROR, $lockfile contains:
$(cat $lockfile )

but it should contain:
$srcfile

This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together.  If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."

	$opt_dry_run || $RM $removelist
	exit $EXIT_FAILURE
      }

      # Just move the object if needed
      if test -n $output_obj && test "X$output_obj" != "X$obj" {
	func_show_eval '$MV "$output_obj" "$obj"' \
	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
      }
    }

    $opt_dry_run || do {
      func_write_libtool_object $libobj "$objdir/$objname" $objname

      # Unlock the critical section if it was locked
      if test $need_locks != no {
	setvar removelist = "$lockfile"
        $RM $lockfile
      }
    }

    exit $EXIT_SUCCESS
}

$opt_help || do {
  test $opt_mode = compile && func_mode_compile ${1+"$@"}
}

proc func_mode_help {
    # We need to display help for each of the modes.
    case (opt_mode) {
      "" {
        # Generic help is extracted from the usage comments
        # at the start of this file.
        func_help
        }

      clean {
        $ECHO \
"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...

Remove files from the build directory.

RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm').  RM-OPTIONS are options (such as \`-f') to be passed
to RM.

If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
        }

      compile {
      $ECHO \
"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE

Compile a source file into a libtool library object.

This mode accepts the following additional options:

  -o OUTPUT-FILE    set the output file name to OUTPUT-FILE
  -no-suppress      do not suppress compiler output for multiple passes
  -prefer-pic       try to build PIC objects only
  -prefer-non-pic   try to build non-PIC objects only
  -shared           do not build a \`.o' file suitable for static linking
  -static           only build a \`.o' file suitable for static linking
  -Wc,FLAG          pass FLAG directly to the compiler

COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.

The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
        }

      execute {
        $ECHO \
"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...

Automatically set library path, then run a program.

This mode accepts the following additional options:

  -dlopen FILE      add the directory containing FILE to the library path

This mode sets the library path environment variable according to \`-dlopen'
flags.

If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.

Then, COMMAND is executed, with ARGS as arguments."
        }

      finish {
        $ECHO \
"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...

Complete the installation of libtool libraries.

Each LIBDIR is a directory that contains libtool libraries.

The commands that this mode executes may require superuser privileges.  Use
the \`--dry-run' option if you just want to see what would be executed."
        }

      install {
        $ECHO \
"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...

Install executables or libraries.

INSTALL-COMMAND is the installation command.  The first component should be
either the \`install' or \`cp' program.

The following components of INSTALL-COMMAND are treated specially:

  -inst-prefix-dir PREFIX-DIR  Use PREFIX-DIR as a staging area for installation

The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
        }

      link {
        $ECHO \
"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...

Link object files or libraries together to form another library, or to
create an executable program.

LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.

The following components of LINK-COMMAND are treated specially:

  -all-static       do not do any dynamic linking at all
  -avoid-version    do not add a version suffix if possible
  -bindir BINDIR    specify path to binaries directory (for systems where
                    libraries must be found in the PATH setting at runtime)
  -dlopen FILE      \`-dlpreopen' FILE if it cannot be dlopened at runtime
  -dlpreopen FILE   link in FILE and add its symbols to lt_preloaded_symbols
  -export-dynamic   allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
  -export-symbols SYMFILE
                    try to export only the symbols listed in SYMFILE
  -export-symbols-regex REGEX
                    try to export only the symbols matching REGEX
  -LLIBDIR          search LIBDIR for required installed libraries
  -lNAME            OUTPUT-FILE requires the installed library libNAME
  -module           build a library that can dlopened
  -no-fast-install  disable the fast-install mode
  -no-install       link a not-installable executable
  -no-undefined     declare that a library does not refer to external symbols
  -o OUTPUT-FILE    create OUTPUT-FILE from the specified objects
  -objectlist FILE  Use a list of object files found in FILE to specify objects
  -precious-files-regex REGEX
                    don't remove output files matching REGEX
  -release RELEASE  specify package release information
  -rpath LIBDIR     the created library will eventually be installed in LIBDIR
  -R[ ]LIBDIR       add LIBDIR to the runtime path of programs and libraries
  -shared           only do dynamic linking of libtool libraries
  -shrext SUFFIX    override the standard shared library file extension
  -static           do not do any dynamic linking of uninstalled libtool libraries
  -static-libtool-libs
                    do not do any dynamic linking of libtool libraries
  -version-info CURRENT[:REVISION[:AGE]]
                    specify library version info [each variable defaults to 0]
  -weak LIBNAME     declare that the target provides the LIBNAME interface
  -Wc,FLAG
  -Xcompiler FLAG   pass linker-specific FLAG directly to the compiler
  -Wl,FLAG
  -Xlinker FLAG     pass linker-specific FLAG directly to the linker
  -XCClinker FLAG   pass link-specific FLAG to the compiler driver (CC)

All other options (arguments beginning with \`-') are ignored.

Every other argument is treated as a filename.  Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.

If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.

If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.

If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
        }

      uninstall {
        $ECHO \
"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...

Remove libraries from an installation directory.

RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm').  RM-OPTIONS are options (such as \`-f') to be passed
to RM.

If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
        }

      * {
        func_fatal_help "invalid operation mode \`$opt_mode'"
        }
    }

    echo
    $ECHO "Try \`$progname --help' for more information about other modes."
}

# Now that we've collected a possible --mode arg, show help if necessary
if $opt_help {
  if test $opt_help = : {
    func_mode_help
  } else {
    do {
      func_help noexit
      for opt_mode in compile link execute install finish uninstall clean {
	func_mode_help
      }
    } | sed -n '1p; 2,$s/^Usage:/  or: /p'
    do {
      func_help noexit
      for opt_mode in compile link execute install finish uninstall clean {
	echo
	func_mode_help
      }
    } |
    sed '1d
      /^When reporting/,/^Report/{
	H
	d
      }
      $x
      /information about other modes/d
      /more detailed .*MODE/d
      s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/'
  }
  exit $?
}


# func_mode_execute arg...
proc func_mode_execute {
    $opt_debug
    # The first argument is the command name.
    setvar cmd = "$nonopt"
    test -z $cmd && \
      func_fatal_help "you must specify a COMMAND"

    # Handle -dlopen flags immediately.
    for file in $opt_dlopen {
      test -f $file \
	|| func_fatal_help "\`$file' is not a file"

      setvar dir = ''
      case (file) {
      *.la {
	func_resolve_sysroot $file
	setvar file = "$func_resolve_sysroot_result"

	# Check to see that this really is a libtool archive.
	func_lalib_unsafe_p $file \
	  || func_fatal_help "\`$lib' is not a valid libtool archive"

	# Read the libtool library.
	setvar dlname = ''
	setvar library_names = ''
	func_source $file

	# Skip this library if it cannot be dlopened.
	if test -z $dlname {
	  # Warn if it was a shared library.
	  test -n $library_names && \
	    func_warning "\`$file' was not linked with \`-export-dynamic'"
	  continue
	}

	func_dirname $file "" "."
	setvar dir = "$func_dirname_result"

	if test -f "$dir/$objdir/$dlname" {
	  func_append dir "/$objdir"
	} else {
	  if test ! -f "$dir/$dlname" {
	    func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'"
	  }
	}
	}

      *.lo {
	# Just add the directory containing the .lo file.
	func_dirname $file "" "."
	setvar dir = "$func_dirname_result"
	}

      * {
	func_warning "\`-dlopen' is ignored for non-libtool libraries and objects"
	continue
	}
      }

      # Get the absolute pathname.
      setvar absdir = $(cd $dir && pwd)
      test -n $absdir && setvar dir = "$absdir"

      # Now add the directory to shlibpath_var.
      if eval "test -z \"\$$shlibpath_var\"" {
	eval "$shlibpath_var=\"\$dir\""
      } else {
	eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
      }
    }

    # This variable tells wrapper scripts just to set shlibpath_var
    # rather than running their programs.
    setvar libtool_execute_magic = "$magic"

    # Check if any of the arguments is a wrapper script.
    setvar args = ''for file in @ARGV {
      case (file) {
      -* | *.la | *.lo  { }
      * {
	# Do a test to see if this is really a libtool program.
	if func_ltwrapper_script_p $file {
	  func_source $file
	  # Transform arg to wrapped name.
	  setvar file = ""$progdir/$program""
	} elif func_ltwrapper_executable_p $file {
	  func_ltwrapper_scriptname $file
	  func_source $func_ltwrapper_scriptname_result
	  # Transform arg to wrapped name.
	  setvar file = ""$progdir/$program""
	}
	}
      }
      # Quote arguments (to preserve shell metacharacters).
      func_append_quoted args $file
    }

    if test "X$opt_dry_run" = Xfalse {
      if test -n $shlibpath_var {
	# Export the shlibpath_var.
	eval "export $shlibpath_var"
      }

      # Restore saved environment variables
      for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
      {
	eval "if test \"\${save_$lt_var+set}\" = set; then
                $lt_var=\$save_$lt_var; export $lt_var
	      else
		$lt_unset $lt_var
	      fi"
      }

      # Now prepare to actually exec the command.
      setvar exec_cmd = ""\$cmd$args""
    } else {
      # Display what would be done.
      if test -n $shlibpath_var {
	eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
	echo "export $shlibpath_var"
      }
      $ECHO "$cmd$args"
      exit $EXIT_SUCCESS
    }
}

test $opt_mode = execute && func_mode_execute ${1+"$@"}


# func_mode_finish arg...
proc func_mode_finish {
    $opt_debug
    setvar libs = ''
    setvar libdirs = ''
    setvar admincmds = ''

    for opt in "$nonopt" ${1+"$@"}
    {
      if test -d $opt {
	func_append libdirs " $opt"

      } elif test -f $opt {
	if func_lalib_unsafe_p $opt {
	  func_append libs " $opt"
	} else {
	  func_warning "\`$opt' is not a valid libtool archive"
	}

      } else {
	func_fatal_error "invalid argument \`$opt'"
      }
    }

    if test -n $libs {
      if test -n $lt_sysroot {
        setvar sysroot_regex = $($ECHO $lt_sysroot | $SED $sed_make_literal_regex)
        setvar sysroot_cmd = ""s/\([ ']\)$sysroot_regex/\1/g;""
      } else {
        setvar sysroot_cmd = ''
      }

      # Remove sysroot references
      if $opt_dry_run {
        for lib in $libs {
          echo "removing references to $lt_sysroot and \`=' prefixes from $lib"
        }
      } else {
        setvar tmpdir = $(func_mktempdir)
        for lib in $libs {
	  sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \
	    > $tmpdir/tmp-la
	  mv -f $tmpdir/tmp-la $lib
	}
        ${RM}r $tmpdir
      }
    }

    if test -n "$finish_cmds$finish_eval" && test -n $libdirs {
      for libdir in $libdirs {
	if test -n $finish_cmds {
	  # Do each command in the finish commands.
	  func_execute_cmds $finish_cmds 'admincmds="$admincmds
'"$cmd"'"'
	}
	if test -n $finish_eval {
	  # Do the single finish_eval.
	  eval cmds='"'$finish_eval'"'
	  $opt_dry_run || eval $cmds || func_append admincmds "
       $cmds"
	}
      }
    }

    # Exit here if they wanted silent mode.
    $opt_silent && exit $EXIT_SUCCESS

    if test -n "$finish_cmds$finish_eval" && test -n $libdirs {
      echo "----------------------------------------------------------------------"
      echo "Libraries have been installed in:"
      for libdir in $libdirs {
	$ECHO "   $libdir"
      }
      echo
      echo "If you ever happen to want to link against installed libraries"
      echo "in a given directory, LIBDIR, you must either use libtool, and"
      echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
      echo "flag during linking and do at least one of the following:"
      if test -n $shlibpath_var {
	echo "   - add LIBDIR to the \`$shlibpath_var' environment variable"
	echo "     during execution"
      }
      if test -n $runpath_var {
	echo "   - add LIBDIR to the \`$runpath_var' environment variable"
	echo "     during linking"
      }
      if test -n $hardcode_libdir_flag_spec {
	setvar libdir = 'LIBDIR'
	eval flag='"'$hardcode_libdir_flag_spec'"'

	$ECHO "   - use the \`$flag' linker flag"
      }
      if test -n $admincmds {
	$ECHO "   - have your system administrator run these commands:$admincmds"
      }
      if test -f /etc/ld.so.conf {
	echo "   - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
      }
      echo

      echo "See any operating system documentation about shared libraries for"
      case (host) {
	solaris2.[6789]|solaris2.1[0-9] {
	  echo "more information, such as the ld(1), crle(1) and ld.so(8) manual"
	  echo "pages."
	  }
	* {
	  echo "more information, such as the ld(1) and ld.so(8) manual pages."
	  }
      }
      echo "----------------------------------------------------------------------"
    }
    exit $EXIT_SUCCESS
}

test $opt_mode = finish && func_mode_finish ${1+"$@"}


# func_mode_install arg...
proc func_mode_install {
    $opt_debug
    # There may be an optional sh(1) argument at the beginning of
    # install_prog (especially on Windows NT).
    if test $nonopt = $SHELL || test $nonopt = /bin/sh ||
       # Allow the use of GNU shtool's install command.
       case (nonopt) { *shtool* { :} * { false} } {
      # Aesthetically quote it.
      func_quote_for_eval $nonopt
      setvar install_prog = ""$func_quote_for_eval_result ""
      setvar arg = "$1"
      shift
    } else {
      setvar install_prog = ''
      setvar arg = "$nonopt"
    }

    # The real first argument should be the name of the installation program.
    # Aesthetically quote it.
    func_quote_for_eval $arg
    func_append install_prog $func_quote_for_eval_result
    setvar install_shared_prog = "$install_prog"
    case{
      *[\\\ /]cp\ * { setvar install_cp = ':' }
      * { setvar install_cp = 'false' }
    }

    # We need to accept at least all the BSD install flags.
    setvar dest = ''
    setvar files = ''
    setvar opts = ''
    setvar prev = ''
    setvar install_type = ''
    setvar isdir = 'no'
    setvar stripme = ''
    setvar no_mode = ':'for arg in @ARGV {
      setvar arg2 = ''
      if test -n $dest {
	func_append files " $dest"
	setvar dest = "$arg"
	continue
      }

      case (arg) {
      -d { setvar isdir = 'yes' }
      -f {
	if $install_cp { :; } else {
	  setvar prev = "$arg"
	}
	}
      -g | -m | -o {
	setvar prev = "$arg"
	}
      -s {
	setvar stripme = "" -s""
	continue
	}
      -* {
	}
      * {
	# If the previous option needed an argument, then skip it.
	if test -n $prev {
	  if test "x$prev" = x-m && test -n $install_override_mode {
	    setvar arg2 = "$install_override_mode"
	    setvar no_mode = 'false'
	  }
	  setvar prev = ''
	} else {
	  setvar dest = "$arg"
	  continue
	}
	}
      }

      # Aesthetically quote the argument.
      func_quote_for_eval $arg
      func_append install_prog " $func_quote_for_eval_result"
      if test -n $arg2 {
	func_quote_for_eval $arg2
      }
      func_append install_shared_prog " $func_quote_for_eval_result"
    }

    test -z $install_prog && \
      func_fatal_help "you must specify an install program"

    test -n $prev && \
      func_fatal_help "the \`$prev' option requires an argument"

    if test -n $install_override_mode && $no_mode {
      if $install_cp { :; } else {
	func_quote_for_eval $install_override_mode
	func_append install_shared_prog " -m $func_quote_for_eval_result"
      }
    }

    if test -z $files {
      if test -z $dest {
	func_fatal_help "no file or destination specified"
      } else {
	func_fatal_help "you must specify a destination"
      }
    }

    # Strip any trailing slash from the destination.
    func_stripname '' '/' $dest
    setvar dest = "$func_stripname_result"

    # Check to see that the destination is a directory.
    test -d $dest && setvar isdir = 'yes'
    if test $isdir = yes {
      setvar destdir = "$dest"
      setvar destname = ''
    } else {
      func_dirname_and_basename $dest "" "."
      setvar destdir = "$func_dirname_result"
      setvar destname = "$func_basename_result"

      # Not a directory, so check to see that there is only one file specified.
      set dummy $files; shift
      test "$Argc" -gt 1 && \
	func_fatal_help "\`$dest' is not a directory"
    }
    case (destdir) {
    [\\/]* | [A-Za-z]:[\\/]* { }
    * {
      for file in $files {
	case (file) {
	*.lo { }
	* {
	  func_fatal_help "\`$destdir' must be an absolute directory name"
	  }
	}
      }
      }
    }

    # This variable tells wrapper scripts just to set variables rather
    # than running their programs.
    setvar libtool_install_magic = "$magic"

    setvar staticlibs = ''
    setvar future_libdirs = ''
    setvar current_libdirs = ''
    for file in $files {

      # Do each installation.
      case (file) {
      *.$libext {
	# Do the static libraries later.
	func_append staticlibs " $file"
	}

      *.la {
	func_resolve_sysroot $file
	setvar file = "$func_resolve_sysroot_result"

	# Check to see that this really is a libtool archive.
	func_lalib_unsafe_p $file \
	  || func_fatal_help "\`$file' is not a valid libtool archive"

	setvar library_names = ''
	setvar old_library = ''
	setvar relink_command = ''
	func_source $file

	# Add the libdir to current_libdirs if it is the destination.
	if test "X$destdir" = "X$libdir" {
	  case{
	  *" $libdir "* { }
	  * { func_append current_libdirs " $libdir" }
	  }
	} else {
	  # Note the libdir as a future libdir.
	  case{
	  *" $libdir "* { }
	  * { func_append future_libdirs " $libdir" }
	  }
	}

	func_dirname $file "/" ""
	setvar dir = "$func_dirname_result"
	func_append dir $objdir

	if test -n $relink_command {
	  # Determine the prefix the user has applied to our future dir.
	  setvar inst_prefix_dir = $($ECHO $destdir | $SED -e "s%$libdir\$%%)

	  # Don't allow the user to place us outside of our expected
	  # location b/c this prevents finding dependent libraries that
	  # are installed to the same prefix.
	  # At present, this check doesn't affect windows .dll's that
	  # are installed into $libdir/../bin (currently, that works fine)
	  # but it's something to keep an eye on.
	  test $inst_prefix_dir = $destdir && \
	    func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir"

	  if test -n $inst_prefix_dir {
	    # Stick the inst_prefix_dir data into the link command.
	    setvar relink_command = $($ECHO $relink_command | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%)
	  } else {
	    setvar relink_command = $($ECHO $relink_command | $SED "s%@inst_prefix_dir@%%)
	  }

	  func_warning "relinking \`$file'"
	  func_show_eval $relink_command \
	    'func_fatal_error "error: relink \`$file''''' with the above command before installing it"'
	}

	# See the names of the shared library.
	set dummy $library_names; shift
	if test -n $1 {
	  setvar realname = "$1"
	  shift

	  setvar srcname = "$realname"
	  test -n $relink_command && setvar srcname = ""$realname"T"

	  # Install the shared library and build the symlinks.
	  func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \
	      'exit $?'
	  setvar tstripme = "$stripme"
	  case (host_os) {
	  cygwin* | mingw* | pw32* | cegcc* {
	    case (realname) {
	    *.dll.a {
	      setvar tstripme = """"
	      }
	    }
	    }
	  }
	  if test -n $tstripme && test -n $striplib {
	    func_show_eval "$striplib $destdir/$realname" 'exit $?'
	  }

	  if test "$Argc" -gt 0 {for linkname in @ARGV {
	      test $linkname != $realname \
		&& func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
	    }
	  }

	  # Do each command in the postinstall commands.
	  setvar lib = ""$destdir/$realname""
	  func_execute_cmds $postinstall_cmds 'exit $?'
	}

	# Install the pseudo-library for information purposes.
	func_basename $file
	setvar name = "$func_basename_result"
	setvar instname = ""$dir/$name"i"
	func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'

	# Maybe install the static library, too.
	test -n $old_library && func_append staticlibs " $dir/$old_library"
	}

      *.lo {
	# Install (i.e. copy) a libtool object.

	# Figure out destination file name, if it wasn't already specified.
	if test -n $destname {
	  setvar destfile = ""$destdir/$destname""
	} else {
	  func_basename $file
	  setvar destfile = "$func_basename_result"
	  setvar destfile = ""$destdir/$destfile""
	}

	# Deduce the name of the destination old-style object file.
	case (destfile) {
	*.lo {
	  func_lo2o $destfile
	  setvar staticdest = "$func_lo2o_result"
	  }
	*.$objext {
	  setvar staticdest = "$destfile"
	  setvar destfile = ''
	  }
	* {
	  func_fatal_help "cannot copy a libtool object to \`$destfile'"
	  }
	}

	# Install the libtool object if requested.
	test -n $destfile && \
	  func_show_eval "$install_prog $file $destfile" 'exit $?'

	# Install the old object if enabled.
	if test $build_old_libs = yes {
	  # Deduce the name of the old-style object file.
	  func_lo2o $file
	  setvar staticobj = "$func_lo2o_result"
	  func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
	}
	exit $EXIT_SUCCESS
	}

      * {
	# Figure out destination file name, if it wasn't already specified.
	if test -n $destname {
	  setvar destfile = ""$destdir/$destname""
	} else {
	  func_basename $file
	  setvar destfile = "$func_basename_result"
	  setvar destfile = ""$destdir/$destfile""
	}

	# If the file is missing, and there is a .exe on the end, strip it
	# because it is most likely a libtool script we actually want to
	# install
	setvar stripped_ext = """"
	case (file) {
	  *.exe {
	    if test ! -f $file {
	      func_stripname '' '.exe' $file
	      setvar file = "$func_stripname_result"
	      setvar stripped_ext = "".exe""
	    }
	    }
	}

	# Do a test to see if this is really a libtool program.
	case (host) {
	*cygwin* | *mingw* {
	    if func_ltwrapper_executable_p $file {
	      func_ltwrapper_scriptname $file
	      setvar wrapper = "$func_ltwrapper_scriptname_result"
	    } else {
	      func_stripname '' '.exe' $file
	      setvar wrapper = "$func_stripname_result"
	    }
	    }
	* {
	    setvar wrapper = "$file"
	    }
	}
	if func_ltwrapper_script_p $wrapper {
	  setvar notinst_deplibs = ''
	  setvar relink_command = ''

	  func_source $wrapper

	  # Check the variables that should have been set.
	  test -z $generated_by_libtool_version && \
	    func_fatal_error "invalid libtool wrapper script \`$wrapper'"

	  setvar finalize = 'yes'
	  for lib in $notinst_deplibs {
	    # Check to see that each library is installed.
	    setvar libdir = ''
	    if test -f $lib {
	      func_source $lib
	    }
	    setvar libfile = ""$libdir/"$($ECHO $lib | $SED 's%^.*/%%g)" ### testsuite: skip nested quoting test
	    if test -n $libdir && test ! -f $libfile {
	      func_warning "\`$lib' has not been installed in \`$libdir'"
	      setvar finalize = 'no'
	    }
	  }

	  setvar relink_command = ''
	  func_source $wrapper

	  setvar outputname = ''
	  if test $fast_install = no && test -n $relink_command {
	    $opt_dry_run || do {
	      if test $finalize = yes {
	        setvar tmpdir = $(func_mktempdir)
		func_basename "$file$stripped_ext"
		setvar file = "$func_basename_result"
	        setvar outputname = ""$tmpdir/$file""
	        # Replace the output file specification.
	        setvar relink_command = $($ECHO $relink_command | $SED 's%@OUTPUT@%'"$outputname"'%g)

	        $opt_silent || do {
	          func_quote_for_expand $relink_command
		  eval "func_echo $func_quote_for_expand_result"
	        }
	        if eval $relink_command { :
	          } else {
		  func_error "error: relink \`$file' with the above command before installing it"
		  $opt_dry_run || ${RM}r $tmpdir
		  continue
	        }
	        setvar file = "$outputname"
	      } else {
	        func_warning "cannot relink \`$file'"
	      }
	    }
	  } else {
	    # Install the binary that we compiled earlier.
	    setvar file = $($ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%)
	  }
	}

	# remove .exe since cygwin /usr/bin/install will append another
	# one anyway
	case (install_prog) {
	*/usr/bin/install*,*cygwin* {
	  case (file) {
	  *.exe:*.exe {
	    # this is ok
	    }
	  *.exe:* {
	    setvar destfile = "$destfile.exe"
	    }
	  *:*.exe {
	    func_stripname '' '.exe' $destfile
	    setvar destfile = "$func_stripname_result"
	    }
	  }
	  }
	}
	func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
	$opt_dry_run || if test -n $outputname {
	  ${RM}r $tmpdir
	}
	}
      }
    }

    for file in $staticlibs {
      func_basename $file
      setvar name = "$func_basename_result"

      # Set up the ranlib parameters.
      setvar oldlib = ""$destdir/$name""
      func_to_tool_file $oldlib func_convert_file_msys_to_w32
      setvar tool_oldlib = "$func_to_tool_file_result"

      func_show_eval "$install_prog \$file \$oldlib" 'exit $?'

      if test -n $stripme && test -n $old_striplib {
	func_show_eval "$old_striplib $tool_oldlib" 'exit $?'
      }

      # Do each command in the postinstall commands.
      func_execute_cmds $old_postinstall_cmds 'exit $?'
    }

    test -n $future_libdirs && \
      func_warning "remember to run \`$progname --finish$future_libdirs'"

    if test -n $current_libdirs {
      # Maybe just do a dry run.
      $opt_dry_run && setvar current_libdirs = "" -n$current_libdirs""
      setvar exec_cmd = ''$SHELL $progpath $preserve_args --finish$current_libdirs''
    } else {
      exit $EXIT_SUCCESS
    }
}

test $opt_mode = install && func_mode_install ${1+"$@"}


# func_generate_dlsyms outputname originator pic_p
# Extract symbols from dlprefiles and create ${outputname}S.o with
# a dlpreopen symbol table.
proc func_generate_dlsyms {
    $opt_debug
    setvar my_outputname = "$1"
    setvar my_originator = "$2"
    setvar my_pic_p = "${3-no}"
    setvar my_prefix = $($ECHO $my_originator | sed 's%[^a-zA-Z0-9]%_%g)
    setvar my_dlsyms = ''

    if test -n "$dlfiles$dlprefiles" || test $dlself != no {
      if test -n $NM && test -n $global_symbol_pipe {
	setvar my_dlsyms = ""${my_outputname}S.c""
      } else {
	func_error "not configured to extract global symbols from dlpreopened files"
      }
    }

    if test -n $my_dlsyms {
      case (my_dlsyms) {
      "" { }
      *.c {
	# Discover the nlist of each of the dlfiles.
	setvar nlist = ""$output_objdir/${my_outputname}.nm""

	func_show_eval "$RM $nlist ${nlist}S ${nlist}T"

	# Parse the name list into a source file.
	func_verbose "creating $output_objdir/$my_dlsyms"

	$opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */
/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */

#ifdef __cplusplus
extern \"C\" {
#endif

#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
#endif

/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
/* DATA imports from DLLs on WIN32 con't be const, because runtime
   relocations are performed -- see ld's documentation on pseudo-relocs.  */
# define LT_DLSYM_CONST
#elif defined(__osf__)
/* This system does not cope well with relocations in const data.  */
# define LT_DLSYM_CONST
#else
# define LT_DLSYM_CONST const
#endif

/* External symbol declarations for the compiler. */> "$output_objdir/$my_dlsyms" "\
/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */
/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */

#ifdef __cplusplus
extern \"C\" {
#endif

#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
#endif

/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
/* DATA imports from DLLs on WIN32 con't be const, because runtime
   relocations are performed -- see ld's documentation on pseudo-relocs.  */
# define LT_DLSYM_CONST
#elif defined(__osf__)
/* This system does not cope well with relocations in const data.  */
# define LT_DLSYM_CONST
#else
# define LT_DLSYM_CONST const
#endif

/* External symbol declarations for the compiler. */\
"

	if test $dlself = yes {
	  func_verbose "generating symbol list for \`$output'"

	  $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"

	  # Add our own program objects to the symbol list.
	  setvar progfiles = $($ECHO "$objs$old_deplibs" | $SP2NL | $SED $lo2o | $NL2SP)
	  for progfile in $progfiles {
	    func_to_tool_file $progfile func_convert_file_msys_to_w32
	    func_verbose "extracting global C symbols from \`$func_to_tool_file_result'"
	    $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'"
	  }

	  if test -n $exclude_expsyms {
	    $opt_dry_run || do {
	      eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
	      eval '$MV "$nlist"T "$nlist"'
	    }
	  }

	  if test -n $export_symbols_regex {
	    $opt_dry_run || do {
	      eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
	      eval '$MV "$nlist"T "$nlist"'
	    }
	  }

	  # Prepare the list of exported symbols
	  if test -z $export_symbols {
	    setvar export_symbols = ""$output_objdir/$outputname.exp""
	    $opt_dry_run || do {
	      $RM $export_symbols
	      eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
	      case (host) {
	      *cygwin* | *mingw* | *cegcc*  {
                eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
                eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
	        }
	      }
	    }
	  } else {
	    $opt_dry_run || do {
	      eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
	      eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
	      eval '$MV "$nlist"T "$nlist"'
	      case (host) {
	        *cygwin* | *mingw* | *cegcc*  {
	          eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
	          eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
	          }
	      }
	    }
	  }
	}

	for dlprefile in $dlprefiles {
	  func_verbose "extracting global C symbols from \`$dlprefile'"
	  func_basename $dlprefile
	  setvar name = "$func_basename_result"
          case (host) {
	    *cygwin* | *mingw* | *cegcc*  {
	      # if an import library, we need to obtain dlname
	      if func_win32_import_lib_p $dlprefile {
	        func_tr_sh $dlprefile
	        eval "curr_lafile=\$libfile_$func_tr_sh_result"
	        setvar dlprefile_dlbasename = """"
	        if test -n $curr_lafile && func_lalib_p $curr_lafile {
	          # Use subshell, to avoid clobbering current variable values
	          setvar dlprefile_dlname = $(source $curr_lafile && echo $dlname)
	          if test -n $dlprefile_dlname  {
	            func_basename $dlprefile_dlname
	            setvar dlprefile_dlbasename = "$func_basename_result"
	          } else {
	            # no lafile. user explicitly requested -dlpreopen <import library>.
	            $sharedlib_from_linklib_cmd $dlprefile
	            setvar dlprefile_dlbasename = "$sharedlib_from_linklib_result"
	          }
	        }
	        $opt_dry_run || do {
	          if test -n $dlprefile_dlbasename  {
	            eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"'
	          } else {
	            func_warning "Could not compute DLL name from $name"
	            eval '$ECHO ": $name " >> "$nlist"'
	          }
	          func_to_tool_file $dlprefile func_convert_file_msys_to_w32
	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe |
	            $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'"
	        }
	      } else { # not an import lib
	        $opt_dry_run || do {
	          eval '$ECHO ": $name " >> "$nlist"'
	          func_to_tool_file $dlprefile func_convert_file_msys_to_w32
	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
	        }
	      }
	    }
	    * {
	      $opt_dry_run || do {
	        eval '$ECHO ": $name " >> "$nlist"'
	        func_to_tool_file $dlprefile func_convert_file_msys_to_w32
	        eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
	      }
	    }
          }
	}

	$opt_dry_run || do {
	  # Make sure we have at least an empty file.
	  test -f $nlist || : > "$nlist"

	  if test -n $exclude_expsyms {
	    $EGREP -v " ($exclude_expsyms)$" $nlist > "$nlist"T
	    $MV "$nlist"T $nlist
	  }

	  # Try sorting and uniquifying the output.
	  if $GREP -v "^: " < "$nlist" |
	      if sort -k 3 </dev/null >/dev/null 2>&1 {
		sort -k 3
	      } else {
		sort +2
	      } |
	      uniq > "$nlist"S {
	    :
	  } else {
	    $GREP -v "^: " < "$nlist" > "$nlist"S
	  }

	  if test -f "$nlist"S {
	    eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
	  } else {
	    echo '/* NONE */' >> "$output_objdir/$my_dlsyms"
	  }

	  echo >> "$output_objdir/$my_dlsyms" "\

/* The mapping between symbol names and symbols.  */
typedef struct {
  const char *name;
  void *address;
} lt_dlsymlist;
extern LT_DLSYM_CONST lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[];
LT_DLSYM_CONST lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[] =
{\
  { \"$my_originator\", (void *) 0 },>> "$output_objdir/$my_dlsyms" "\

/* The mapping between symbol names and symbols.  */
typedef struct {
  const char *name;
  void *address;
} lt_dlsymlist;
extern LT_DLSYM_CONST lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[];
LT_DLSYM_CONST lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[] =
{\
  { \"$my_originator\", (void *) 0 },"

	  case (need_lib_prefix) {
	  no {
	    eval $global_symbol_to_c_name_address < "$nlist" >> "$output_objdir/$my_dlsyms"
	    }
	  * {
	    eval $global_symbol_to_c_name_address_lib_prefix < "$nlist" >> "$output_objdir/$my_dlsyms"
	    }
	  }
	  echo >> "$output_objdir/$my_dlsyms" "\
  {0, (void *) 0}
};

/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
  return lt_${my_prefix}_LTX_preloaded_symbols;
}
#endif

#ifdef __cplusplus
}
#endif>> "$output_objdir/$my_dlsyms" "\
  {0, (void *) 0}
};

/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
  return lt_${my_prefix}_LTX_preloaded_symbols;
}
#endif

#ifdef __cplusplus
}
#endif\
"
	} # !$opt_dry_run

	setvar pic_flag_for_symtable = ''
	case{
	*" -static "* { }
	* {
	  case (host) {
	  # compiling the symbol table file with pic_flag works around
	  # a FreeBSD bug that causes programs to crash when -lm is
	  # linked before any other PIC object.  But we must not use
	  # pic_flag when linking with -static.  The problem exists in
	  # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
	  *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0* {
	    setvar pic_flag_for_symtable = "" $pic_flag -DFREEBSD_WORKAROUND"" }
	  *-*-hpux* {
	    setvar pic_flag_for_symtable = "" $pic_flag""  }
	  * {
	    if test "X$my_pic_p" != Xno {
	      setvar pic_flag_for_symtable = "" $pic_flag""
	    }
	    }
	  }
	  }
	}
	setvar symtab_cflags = ''
	for arg in $LTCFLAGS {
	  case (arg) {
	  -pie | -fpie | -fPIE { }
	  * { func_append symtab_cflags " $arg" }
	  }
	}

	# Now compile the dynamic symbol file.
	func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'

	# Clean up the generated files.
	func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"'

	# Transform the symbol file into the correct name.
	setvar symfileobj = ""$output_objdir/${my_outputname}S.$objext""
	case (host) {
	*cygwin* | *mingw* | *cegcc*  {
	  if test -f "$output_objdir/$my_outputname.def" {
	    setvar compile_command = $($ECHO $compile_command | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%)
	    setvar finalize_command = $($ECHO $finalize_command | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%)
	  } else {
	    setvar compile_command = $($ECHO $compile_command | $SED "s%@SYMFILE@%$symfileobj%)
	    setvar finalize_command = $($ECHO $finalize_command | $SED "s%@SYMFILE@%$symfileobj%)
	  }
	  }
	* {
	  setvar compile_command = $($ECHO $compile_command | $SED "s%@SYMFILE@%$symfileobj%)
	  setvar finalize_command = $($ECHO $finalize_command | $SED "s%@SYMFILE@%$symfileobj%)
	  }
	}
	}
      * {
	func_fatal_error "unknown suffix for \`$my_dlsyms'"
	}
      }
    } else {
      # We keep going just in case the user didn't refer to
      # lt_preloaded_symbols.  The linker will fail if global_symbol_pipe
      # really was required.

      # Nullify the symbol file.
      setvar compile_command = $($ECHO $compile_command | $SED "s% @SYMFILE@%%)
      setvar finalize_command = $($ECHO $finalize_command | $SED "s% @SYMFILE@%%)
    }
}

# func_win32_libid arg
# return the library type of file 'arg'
#
# Need a lot of goo to handle *both* DLLs and import libs
# Has to be a shell function in order to 'eat' the argument
# that is supplied when $file_magic_command is called.
# Despite the name, also deal with 64 bit binaries.
proc func_win32_libid {
  $opt_debug
  setvar win32_libid_type = ""unknown""
  setvar win32_fileres = $(file -L $1 )
  case (win32_fileres) {
  *ar\ archive\ import\ library* { # definitely import
    setvar win32_libid_type = ""x86 archive import""
    }
  *ar\ archive* { # could be an import, or static
    # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD.
    if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
       $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null {
      func_to_tool_file $1 func_convert_file_msys_to_w32
      setvar win32_nmres = $(eval $NM -f posix -A '"'$func_to_tool_file_result'"' |
	$SED -n -e '
	    1,100{
		/ I /{
		    s,.*,import,
		    p
		    q
		}
	    })
      case (win32_nmres) {
      import* {  setvar win32_libid_type = ""x86 archive import""}
      * {        setvar win32_libid_type = ""x86 archive static""}
      }
    }
    }
  *DLL* {
    setvar win32_libid_type = ""x86 DLL""
    }
  *executable* { # but shell scripts are "executable" too...
    case (win32_fileres) {
    *MS\ Windows\ PE\ Intel* {
      setvar win32_libid_type = ""x86 DLL""
      }
    }
    }
  }
  $ECHO $win32_libid_type
}

# func_cygming_dll_for_implib ARG
#
# Platform-specific function to extract the
# name of the DLL associated with the specified
# import library ARG.
# Invoked by eval'ing the libtool variable
#    $sharedlib_from_linklib_cmd
# Result is available in the variable
#    $sharedlib_from_linklib_result
proc func_cygming_dll_for_implib {
  $opt_debug
  setvar sharedlib_from_linklib_result = $($DLLTOOL --identify-strict --identify $1)
}

# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs
#
# The is the core of a fallback implementation of a
# platform-specific function to extract the name of the
# DLL associated with the specified import library LIBNAME.
#
# SECTION_NAME is either .idata$6 or .idata$7, depending
# on the platform and compiler that created the implib.
#
# Echos the name of the DLL associated with the
# specified import library.
proc func_cygming_dll_for_implib_fallback_core {
  $opt_debug
  setvar match_literal = $($ECHO $1 | $SED $sed_make_literal_regex)
  $OBJDUMP -s --section $1 $2 2>/dev/null |
    $SED '/^Contents of section '"$match_literal"':/{
      # Place marker at beginning of archive member dllname section
      s/.*/====MARK====/
      p
      d
    }
    # These lines can sometimes be longer than 43 characters, but
    # are always uninteresting
    /:[	 ]*file format pe[i]\{,1\}-/d
    /^In archive [^:]*:/d
    # Ensure marker is printed
    /^====MARK====/p
    # Remove all lines with less than 43 characters
    /^.\{43\}/!d
    # From remaining lines, remove first 43 characters
    s/^.\{43\}//' |
    $SED -n '
      # Join marker and all lines until next marker into a single line
      /^====MARK====/ b para
      H
      $ b para
      b
      :para
      x
      s/\n//g
      # Remove the marker
      s/^====MARK====//
      # Remove trailing dots and whitespace
      s/[\. \t]*$//
      # Print
      /./p' |
    # we now have a list, one entry per line, of the stringified
    # contents of the appropriate section of all members of the
    # archive which possess that section. Heuristic: eliminate
    # all those which have a first or second character that is
    # a '.' (that is, objdump's representation of an unprintable
    # character.) This should work for all archives with less than
    # 0x302f exports -- but will fail for DLLs whose name actually
    # begins with a literal '.' or a single character followed by
    # a '.'.
    #
    # Of those that remain, print the first one.
    $SED -e '/^\./d;/^.\./d;q'
}

# func_cygming_gnu_implib_p ARG
# This predicate returns with zero status (TRUE) if
# ARG is a GNU/binutils-style import library. Returns
# with nonzero status (FALSE) otherwise.
proc func_cygming_gnu_implib_p {
  $opt_debug
  func_to_tool_file $1 func_convert_file_msys_to_w32
  setvar func_cygming_gnu_implib_tmp = $($NM $func_to_tool_file_result | eval $global_symbol_pipe | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$)
  test -n $func_cygming_gnu_implib_tmp
}

# func_cygming_ms_implib_p ARG
# This predicate returns with zero status (TRUE) if
# ARG is an MS-style import library. Returns
# with nonzero status (FALSE) otherwise.
proc func_cygming_ms_implib_p {
  $opt_debug
  func_to_tool_file $1 func_convert_file_msys_to_w32
  setvar func_cygming_ms_implib_tmp = $($NM $func_to_tool_file_result | eval $global_symbol_pipe | $GREP '_NULL_IMPORT_DESCRIPTOR)
  test -n $func_cygming_ms_implib_tmp
}

# func_cygming_dll_for_implib_fallback ARG
# Platform-specific function to extract the
# name of the DLL associated with the specified
# import library ARG.
#
# This fallback implementation is for use when $DLLTOOL
# does not support the --identify-strict option.
# Invoked by eval'ing the libtool variable
#    $sharedlib_from_linklib_cmd
# Result is available in the variable
#    $sharedlib_from_linklib_result
proc func_cygming_dll_for_implib_fallback {
  $opt_debug
  if func_cygming_gnu_implib_p $1  {
    # binutils import library
    setvar sharedlib_from_linklib_result = $(func_cygming_dll_for_implib_fallback_core '.idata$7' $1)
  } elif func_cygming_ms_implib_p $1  {
    # ms-generated import library
    setvar sharedlib_from_linklib_result = $(func_cygming_dll_for_implib_fallback_core '.idata$6' $1)
  } else {
    # unknown
    setvar sharedlib_from_linklib_result = """"
  }
}


# func_extract_an_archive dir oldlib
proc func_extract_an_archive {
    $opt_debug
    setvar f_ex_an_ar_dir = "$1"; shift
    setvar f_ex_an_ar_oldlib = "$1"
    if test $lock_old_archive_extraction = yes {
      setvar lockfile = "$f_ex_an_ar_oldlib.lock"
      while ! $opt_dry_run || ln $progpath $lockfile  {
	func_echo "Waiting for $lockfile to be removed"
	sleep 2
      }
    }
    func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \
		   'stat=$?; rm -f "$lockfile"; exit $stat'
    if test $lock_old_archive_extraction = yes {
      $opt_dry_run || rm -f $lockfile
    }
    if shell {$AR t $f_ex_an_ar_oldlib | sort | sort -uc >/dev/null 2>&1} {
     :
    } else {
      func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
    }
}


# func_extract_archives gentop oldlib ...
proc func_extract_archives {
    $opt_debug
    setvar my_gentop = "$1"; shift
    setvar my_oldlibs = ${1+"$@"}
    setvar my_oldobjs = """"
    setvar my_xlib = """"
    setvar my_xabs = """"
    setvar my_xdir = """"

    for my_xlib in $my_oldlibs {
      # Extract the objects.
      case (my_xlib) {
	[\\/]* | [A-Za-z]:[\\/]* { setvar my_xabs = "$my_xlib" }
	* { setvar my_xabs = "$(pwd)"/$my_xlib"" }
      }
      func_basename $my_xlib
      setvar my_xlib = "$func_basename_result"
      setvar my_xlib_u = "$my_xlib"
      while : {
        case{
	*" $my_xlib_u "* {
	  func_arith $extracted_serial + 1
	  setvar extracted_serial = "$func_arith_result"
	  setvar my_xlib_u = "lt$extracted_serial-$my_xlib" }
	* { break }
	}
      }
      setvar extracted_archives = ""$extracted_archives $my_xlib_u""
      setvar my_xdir = ""$my_gentop/$my_xlib_u""

      func_mkdir_p $my_xdir

      case (host) {
      *-darwin* {
	func_verbose "Extracting $my_xabs"
	# Do not bother doing anything if just a dry run
	$opt_dry_run || do {
	  setvar darwin_orig_dir = $(pwd)
	  cd $my_xdir || exit $?
	  setvar darwin_archive = "$my_xabs"
	  setvar darwin_curdir = $(pwd)
	  setvar darwin_base_archive = $(basename $darwin_archive)
	  setvar darwin_arches = $($LIPO -info $darwin_archive 2>/dev/null | $GREP Architectures 2>/dev/null || true)
	  if test -n $darwin_arches {
	    setvar darwin_arches = $($ECHO $darwin_arches | $SED -e 's/.*are://)
	    setvar darwin_arch = ''
	    func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
	    for darwin_arch in  $darwin_arches  {
	      func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
	      $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" ${darwin_archive}
	      cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
	      func_extract_an_archive $(pwd) ${darwin_base_archive}
	      cd $darwin_curdir
	      $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
	    } # $darwin_arches
            ## Okay now we've a bunch of thin objects, gotta fatten them up :)
	    setvar darwin_filelist = $(find unfat-$$ -type f -name '*'.o -print -o -name '*'.lo -print | $SED -e $basename | sort -u)
	    setvar darwin_file = ''
	    setvar darwin_files = ''
	    for darwin_file in $darwin_filelist {
	      setvar darwin_files = $(find unfat-$$ -name $darwin_file -print | sort | $NL2SP)
	      $LIPO -create -output $darwin_file $darwin_files
	    } # $darwin_filelist
	    $RM -rf unfat-$$
	    cd $darwin_orig_dir
	  } else {
	    cd $darwin_orig_dir
	    func_extract_an_archive $my_xdir $my_xabs
	  } # $darwin_arches
	} # !$opt_dry_run
	}
      * {
        func_extract_an_archive $my_xdir $my_xabs
	}
      }
      setvar my_oldobjs = ""$my_oldobjs "$(find $my_xdir -name '*'.$objext -print -o -name '*'.lo -print | sort | $NL2SP)"
    }

    setvar func_extract_archives_result = "$my_oldobjs"
}


# func_emit_wrapper [arg=no]
#
# Emit a libtool wrapper script on stdout.
# Don't directly open a file because we may want to
# incorporate the script contents within a cygwin/mingw
# wrapper executable.  Must ONLY be called from within
# func_mode_link because it depends on a number of variables
# set therein.
#
# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
# variable will take.  If 'yes', then the emitted script
# will assume that the directory in which it is stored is
# the $objdir directory.  This is a cygwin/mingw-specific
# behavior.
proc func_emit_wrapper {
	setvar func_emit_wrapper_arg1 = ${1-no}

	$ECHO "\
#! $SHELL

# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.

# Sed substitution that helps us do robust quoting.  It backslashifies
# metacharacters that are still active within double-quoted strings.
sed_quote_subst='$sed_quote_subst'

# Be Bourne compatible
if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
  emulate sh
  NULLCMD=:
  # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
  # is contrary to our usage.  Disable this feature.
  alias -g '\${1+\"\$@\"}'='\"\$@\"'
  setopt NO_GLOB_SUBST
else
  case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
fi
BIN_SH=xpg4; export BIN_SH # for Tru64
DUALCASE=1; export DUALCASE # for MKS sh

# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH

relink_command=\"$relink_command\"

# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
  # install mode needs the following variables:
  generated_by_libtool_version='$macro_version'
  notinst_deplibs='$notinst_deplibs'
else
  # When we are sourced in execute mode, \$file and \$ECHO are already set.
  if test \"\$libtool_execute_magic\" != \"$magic\"; then
    file=\"\$0\""

    setvar qECHO = $($ECHO $ECHO | $SED $sed_quote_subst)
    $ECHO "\

# A function that is used when there is no print builtin or printf.
func_fallback_echo ()
{
  eval 'cat <<_LTECHO_EOF
\$1
_LTECHO_EOF'
}
    ECHO=\"$qECHO\"
  fi

# Very basic option parsing. These options are (a) specific to
# the libtool wrapper, (b) are identical between the wrapper
# /script/ and the wrapper /executable/ which is used only on
# windows platforms, and (c) all begin with the string "--lt-"
# (application programs are unlikely to have options which match
# this pattern).
#
# There are only two supported options: --lt-debug and
# --lt-dump-script. There is, deliberately, no --lt-help.
#
# The first argument to this parsing function should be the
# script's $0 value, followed by "$[join(ARGV)]".
lt_option_debug=
func_parse_lt_options ()
{
  lt_script_arg0=\$0
  shift
  for lt_opt
  do
    case \"\$lt_opt\" in
    --lt-debug) lt_option_debug=1 ;;
    --lt-dump-script)
        lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\`
        test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=.
        lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\`
        cat \"\$lt_dump_D/\$lt_dump_F\"
        exit 0
      ;;
    --lt-*)
        \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2
        exit 1
      ;;
    esac
  done

  # Print the debug banner immediately:
  if test -n \"\$lt_option_debug\"; then
    echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2
  fi
}

# Used when --lt-debug. Prints its arguments to stdout
# (redirection is the responsibility of the caller)
func_lt_dump_args ()
{
  lt_dump_args_N=1;
  for lt_arg
  do
    \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\"
    lt_dump_args_N=\`expr \$lt_dump_args_N + 1\`
  done
}

# Core function for launching the target application
func_exec_program_core ()
{
"
  case (host) {
  # Backslashes separate directories on plain windows
  *-*-mingw | *-*-os2* | *-cegcc* {
    $ECHO "\
      if test -n \"\$lt_option_debug\"; then
        \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2
        func_lt_dump_args \${1+\"\$@\"} 1>&2
      fi
      exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
"
    }

  * {
    $ECHO "\
      if test -n \"\$lt_option_debug\"; then
        \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2
        func_lt_dump_args \${1+\"\$@\"} 1>&2
      fi
      exec \"\$progdir/\$program\" \${1+\"\$@\"}
"
    }
  }
  $ECHO "\
      \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
      exit 1
}

# A function to encapsulate launching the target application
# Strips options in the --lt-* namespace from \$@ and
# launches target application with the remaining arguments.
func_exec_program ()
{
  case \" \$* \" in
  *\\ --lt-*)
    for lt_wr_arg
    do
      case \$lt_wr_arg in
      --lt-*) ;;
      *) set x \"\$@\" \"\$lt_wr_arg\"; shift;;
      esac
      shift
    done ;;
  esac
  func_exec_program_core \${1+\"\$@\"}
}

  # Parse options
  func_parse_lt_options \"\$0\" \${1+\"\$@\"}

  # Find the directory that this script lives in.
  thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\`
  test \"x\$thisdir\" = \"x\$file\" && thisdir=.

  # Follow symbolic links until we get to the real thisdir.
  file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\`
  while test -n \"\$file\"; do
    destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\`

    # If there was a directory component, then change thisdir.
    if test \"x\$destdir\" != \"x\$file\"; then
      case \"\$destdir\" in
      [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
      *) thisdir=\"\$thisdir/\$destdir\" ;;
      esac
    fi

    file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\`
    file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\`
  done

  # Usually 'no', except on cygwin/mingw when embedded into
  # the cwrapper.
  WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
  if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
    # special case for '.'
    if test \"\$thisdir\" = \".\"; then
      thisdir=\`pwd\`
    fi
    # remove .libs from thisdir
    case \"\$thisdir\" in
    *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;;
    $objdir )   thisdir=. ;;
    esac
  fi

  # Try to get the absolute directory name.
  absdir=\`cd \"\$thisdir\" && pwd\`
  test -n \"\$absdir\" && thisdir=\"\$absdir\"
"

	if test $fast_install = yes {
	  $ECHO "\
  program=lt-'$outputname'$exeext
  progdir=\"\$thisdir/$objdir\"

  if test ! -f \"\$progdir/\$program\" ||
     { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
       test \"X\$file\" != \"X\$progdir/\$program\"; }; then

    file=\"\$\$-\$program\"

    if test ! -d \"\$progdir\"; then
      $MKDIR \"\$progdir\"
    else
      $RM \"\$progdir/\$file\"
    fi"

	  $ECHO "\

    # relink executable if necessary
    if test -n \"\$relink_command\"; then
      if relink_command_output=\`eval \$relink_command 2>&1\`; then :
      else
	$ECHO \"\$relink_command_output\" >&2
	$RM \"\$progdir/\$file\"
	exit 1
      fi
    fi

    $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
    { $RM \"\$progdir/\$program\";
      $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
    $RM \"\$progdir/\$file\"
  fi"
	} else {
	  $ECHO "\
  program='$outputname'
  progdir=\"\$thisdir/$objdir\"
"
	}

	$ECHO "\

  if test -f \"\$progdir/\$program\"; then"

	# fixup the dll searchpath if we need to.
	#
	# Fix the DLL searchpath if we need to.  Do this before prepending
	# to shlibpath, because on Windows, both are PATH and uninstalled
	# libraries must come first.
	if test -n $dllsearchpath {
	  $ECHO "\
    # Add the dll search path components to the executable PATH
    PATH=$dllsearchpath:\$PATH
"
	}

	# Export our shlibpath_var if we have one.
	if test $shlibpath_overrides_runpath = yes && test -n $shlibpath_var && test -n $temp_rpath {
	  $ECHO "\
    # Add our own library path to $shlibpath_var
    $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"

    # Some systems cannot cope with colon-terminated $shlibpath_var
    # The second colon is a workaround for a bug in BeOS R4 sed
    $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\`

    export $shlibpath_var
"
	}

	$ECHO "\
    if test \"\$libtool_execute_magic\" != \"$magic\"; then
      # Run the actual program with our arguments.
      func_exec_program \${1+\"\$@\"}
    fi
  else
    # The program doesn't exist.
    \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
    \$ECHO \"This script is just a wrapper for \$program.\" 1>&2
    \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
    exit 1
  fi
fi\
"
}


# func_emit_cwrapperexe_src
# emit the source code for a wrapper executable on stdout
# Must ONLY be called from within func_mode_link because
# it depends on a number of variable set therein.
proc func_emit_cwrapperexe_src {
	cat <<< """

/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
   Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION

   The $output program cannot be directly executed until all the libtool
   libraries that it depends on are installed.

   This wrapper executable should never be moved out of the build directory.
   If it is, it will not operate correctly.
*/
"""
	    cat <<< '''
#ifdef _MSC_VER
# define _CRT_SECURE_NO_DEPRECATE 1
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef _MSC_VER
# include <direct.h>
# include <process.h>
# include <io.h>
#else
# include <unistd.h>
# include <stdint.h>
# ifdef __CYGWIN__
#  include <io.h>
# endif
#endif
#include <malloc.h>
#include <stdarg.h>
#include <assert.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>

/* declarations of non-ANSI functions */
#if defined(__MINGW32__)
# ifdef __STRICT_ANSI__
int _putenv (const char *);
# endif
#elif defined(__CYGWIN__)
# ifdef __STRICT_ANSI__
char *realpath (const char *, char *);
int putenv (char *);
int setenv (const char *, const char *, int);
# endif
/* #elif defined (other platforms) ... */
#endif

/* portability defines, excluding path handling macros */
#if defined(_MSC_VER)
# define setmode _setmode
# define stat    _stat
# define chmod   _chmod
# define getcwd  _getcwd
# define putenv  _putenv
# define S_IXUSR _S_IEXEC
# ifndef _INTPTR_T_DEFINED
#  define _INTPTR_T_DEFINED
#  define intptr_t int
# endif
#elif defined(__MINGW32__)
# define setmode _setmode
# define stat    _stat
# define chmod   _chmod
# define getcwd  _getcwd
# define putenv  _putenv
#elif defined(__CYGWIN__)
# define HAVE_SETENV
# define FOPEN_WB "wb"
/* #elif defined (other platforms) ... */
#endif

#if defined(PATH_MAX)
# define LT_PATHMAX PATH_MAX
#elif defined(MAXPATHLEN)
# define LT_PATHMAX MAXPATHLEN
#else
# define LT_PATHMAX 1024
#endif

#ifndef S_IXOTH
# define S_IXOTH 0
#endif
#ifndef S_IXGRP
# define S_IXGRP 0
#endif

/* path handling portability macros */
#ifndef DIR_SEPARATOR
# define DIR_SEPARATOR '/'
# define PATH_SEPARATOR ':'
#endif

#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
  defined (__OS2__)
# define HAVE_DOS_BASED_FILE_SYSTEM
# define FOPEN_WB "wb"
# ifndef DIR_SEPARATOR_2
#  define DIR_SEPARATOR_2 '\\'
# endif
# ifndef PATH_SEPARATOR_2
#  define PATH_SEPARATOR_2 ';'
# endif
#endif

#ifndef DIR_SEPARATOR_2
# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
#else /* DIR_SEPARATOR_2 */
# define IS_DIR_SEPARATOR(ch) \
	(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */

#ifndef PATH_SEPARATOR_2
# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
#else /* PATH_SEPARATOR_2 */
# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
#endif /* PATH_SEPARATOR_2 */

#ifndef FOPEN_WB
# define FOPEN_WB "w"
#endif
#ifndef _O_BINARY
# define _O_BINARY 0
#endif

#define XMALLOC(type, num)      ((type *) xmalloc ((num) * sizeof(type)))
#define XFREE(stale) do { \
  if (stale) { free ((void *) stale); stale = 0; } \
} while (0)

#if defined(LT_DEBUGWRAPPER)
static int lt_debug = 1;
#else
static int lt_debug = 0;
#endif

const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */

void *xmalloc (size_t num);
char *xstrdup (const char *string);
const char *base_name (const char *name);
char *find_executable (const char *wrapper);
char *chase_symlinks (const char *pathspec);
int make_executable (const char *path);
int check_executable (const char *path);
char *strendzap (char *str, const char *pat);
void lt_debugprintf (const char *file, int line, const char *fmt, ...);
void lt_fatal (const char *file, int line, const char *message, ...);
static const char *nonnull (const char *s);
static const char *nonempty (const char *s);
void lt_setenv (const char *name, const char *value);
char *lt_extend_str (const char *orig_value, const char *add, int to_end);
void lt_update_exe_path (const char *name, const char *value);
void lt_update_lib_path (const char *name, const char *value);
char **prepare_spawn (char **argv);
void lt_dump_script (FILE *f);
'''

	    cat <<< """
volatile const char * MAGIC_EXE = "$magic_exe";
const char * LIB_PATH_VARNAME = "$shlibpath_var";
"""

	    if test $shlibpath_overrides_runpath = yes && test -n $shlibpath_var && test -n $temp_rpath {
              func_to_host_path $temp_rpath
	      cat <<< """
const char * LIB_PATH_VALUE   = "$func_to_host_path_result";
"""
	    } else {
	      cat <<< '''
const char * LIB_PATH_VALUE   = "";
'''
	    }

	    if test -n $dllsearchpath {
              func_to_host_path "$dllsearchpath:"
	      cat <<< """
const char * EXE_PATH_VARNAME = "PATH";
const char * EXE_PATH_VALUE   = "$func_to_host_path_result";
"""
	    } else {
	      cat <<< '''
const char * EXE_PATH_VARNAME = "";
const char * EXE_PATH_VALUE   = "";
'''
	    }

	    if test $fast_install = yes {
	      cat <<< """
const char * TARGET_PROGRAM_NAME = "lt-$outputname"; /* hopefully, no .exe */
"""
	    } else {
	      cat <<< """
const char * TARGET_PROGRAM_NAME = "$outputname"; /* hopefully, no .exe */
"""
	    }


	    cat <<< '''

#define LTWRAPPER_OPTION_PREFIX         "--lt-"

static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
static const char *dumpscript_opt       = LTWRAPPER_OPTION_PREFIX "dump-script";
static const char *debug_opt            = LTWRAPPER_OPTION_PREFIX "debug";

int
main (int argc, char *argv[])
{
  char **newargz;
  int  newargc;
  char *tmp_pathspec;
  char *actual_cwrapper_path;
  char *actual_cwrapper_name;
  char *target_name;
  char *lt_argv_zero;
  intptr_t rval = 127;

  int i;

  program_name = (char *) xstrdup (base_name (argv[0]));
  newargz = XMALLOC (char *, argc + 1);

  /* very simple arg parsing; don't want to rely on getopt
   * also, copy all non cwrapper options to newargz, except
   * argz[0], which is handled differently
   */
  newargc=0;
  for (i = 1; i < argc; i++)
    {
      if (strcmp (argv[i], dumpscript_opt) == 0)
	{
'''
	    case (host) {
	      *mingw* | *cygwin*  {
		# make stdout use "unix" line endings
		echo "          setmode(1,_O_BINARY);"
		}
	      }

	    cat <<< '''
	  lt_dump_script (stdout);
	  return 0;
	}
      if (strcmp (argv[i], debug_opt) == 0)
	{
          lt_debug = 1;
          continue;
	}
      if (strcmp (argv[i], ltwrapper_option_prefix) == 0)
        {
          /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
             namespace, but it is not one of the ones we know about and
             have already dealt with, above (inluding dump-script), then
             report an error. Otherwise, targets might begin to believe
             they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
             namespace. The first time any user complains about this, we'll
             need to make LTWRAPPER_OPTION_PREFIX a configure-time option
             or a configure.ac-settable value.
           */
          lt_fatal (__FILE__, __LINE__,
		    "unrecognized %s option: '%s'",
                    ltwrapper_option_prefix, argv[i]);
        }
      /* otherwise ... */
      newargz[++newargc] = xstrdup (argv[i]);
    }
  newargz[++newargc] = NULL;

'''
	    cat <<< """
  /* The GNU banner must be the first non-error debug message */
  lt_debugprintf (__FILE__, __LINE__, "libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\n");
"""
	    cat <<< '''
  lt_debugprintf (__FILE__, __LINE__, "(main) argv[0]: %s\n", argv[0]);
  lt_debugprintf (__FILE__, __LINE__, "(main) program_name: %s\n", program_name);

  tmp_pathspec = find_executable (argv[0]);
  if (tmp_pathspec == NULL)
    lt_fatal (__FILE__, __LINE__, "couldn't find %s", argv[0]);
  lt_debugprintf (__FILE__, __LINE__,
                  "(main) found exe (before symlink chase) at: %s\n",
		  tmp_pathspec);

  actual_cwrapper_path = chase_symlinks (tmp_pathspec);
  lt_debugprintf (__FILE__, __LINE__,
                  "(main) found exe (after symlink chase) at: %s\n",
		  actual_cwrapper_path);
  XFREE (tmp_pathspec);

  actual_cwrapper_name = xstrdup (base_name (actual_cwrapper_path));
  strendzap (actual_cwrapper_path, actual_cwrapper_name);

  /* wrapper name transforms */
  strendzap (actual_cwrapper_name, ".exe");
  tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1);
  XFREE (actual_cwrapper_name);
  actual_cwrapper_name = tmp_pathspec;
  tmp_pathspec = 0;

  /* target_name transforms -- use actual target program name; might have lt- prefix */
  target_name = xstrdup (base_name (TARGET_PROGRAM_NAME));
  strendzap (target_name, ".exe");
  tmp_pathspec = lt_extend_str (target_name, ".exe", 1);
  XFREE (target_name);
  target_name = tmp_pathspec;
  tmp_pathspec = 0;

  lt_debugprintf (__FILE__, __LINE__,
		  "(main) libtool target name: %s\n",
		  target_name);
'''

	    cat <<< """
  newargz[0] =
    XMALLOC (char, (strlen (actual_cwrapper_path) +
		    strlen ("$objdir") + 1 + strlen (actual_cwrapper_name) + 1));
  strcpy (newargz[0], actual_cwrapper_path);
  strcat (newargz[0], "$objdir");
  strcat (newargz[0], "/");
"""

	    cat <<< '''
  /* stop here, and copy so we don't have to do this twice */
  tmp_pathspec = xstrdup (newargz[0]);

  /* do NOT want the lt- prefix here, so use actual_cwrapper_name */
  strcat (newargz[0], actual_cwrapper_name);

  /* DO want the lt- prefix here if it exists, so use target_name */
  lt_argv_zero = lt_extend_str (tmp_pathspec, target_name, 1);
  XFREE (tmp_pathspec);
  tmp_pathspec = NULL;
'''

	    case (host_os) {
	      mingw* {
	    cat <<< '''
  {
    char* p;
    while ((p = strchr (newargz[0], '\\')) != NULL)
      {
	*p = '/';
      }
    while ((p = strchr (lt_argv_zero, '\\')) != NULL)
      {
	*p = '/';
      }
  }
'''
	    }
	    }

	    cat <<< '''
  XFREE (target_name);
  XFREE (actual_cwrapper_path);
  XFREE (actual_cwrapper_name);

  lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
  lt_setenv ("DUALCASE", "1");  /* for MSK sh */
  /* Update the DLL searchpath.  EXE_PATH_VALUE ($dllsearchpath) must
     be prepended before (that is, appear after) LIB_PATH_VALUE ($temp_rpath)
     because on Windows, both *_VARNAMEs are PATH but uninstalled
     libraries must come first. */
  lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
  lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);

  lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n",
		  nonnull (lt_argv_zero));
  for (i = 0; i < newargc; i++)
    {
      lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n",
		      i, nonnull (newargz[i]));
    }

'''

	    case (host_os) {
	      mingw* {
		cat <<< '''
  /* execv doesn't actually work on mingw as expected on unix */
  newargz = prepare_spawn (newargz);
  rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
  if (rval == -1)
    {
      /* failed to start process */
      lt_debugprintf (__FILE__, __LINE__,
		      "(main) failed to launch target \"%s\": %s\n",
		      lt_argv_zero, nonnull (strerror (errno)));
      return 127;
    }
  return rval;
'''
		}
	      * {
		cat <<< '''
  execv (lt_argv_zero, newargz);
  return rval; /* =127, but avoids unused variable warning */
'''
		}
	    }

	    cat <<< '''
}

void *
xmalloc (size_t num)
{
  void *p = (void *) malloc (num);
  if (!p)
    lt_fatal (__FILE__, __LINE__, "memory exhausted");

  return p;
}

char *
xstrdup (const char *string)
{
  return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
			  string) : NULL;
}

const char *
base_name (const char *name)
{
  const char *base;

#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
  /* Skip over the disk name in MSDOS pathnames. */
  if (isalpha ((unsigned char) name[0]) && name[1] == ':')
    name += 2;
#endif

  for (base = name; *name; name++)
    if (IS_DIR_SEPARATOR (*name))
      base = name + 1;
  return base;
}

int
check_executable (const char *path)
{
  struct stat st;

  lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n",
                  nonempty (path));
  if ((!path) || (!*path))
    return 0;

  if ((stat (path, &st) >= 0)
      && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
    return 1;
  else
    return 0;
}

int
make_executable (const char *path)
{
  int rval = 0;
  struct stat st;

  lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n",
                  nonempty (path));
  if ((!path) || (!*path))
    return 0;

  if (stat (path, &st) >= 0)
    {
      rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
    }
  return rval;
}

/* Searches for the full path of the wrapper.  Returns
   newly allocated full path name if found, NULL otherwise
   Does not chase symlinks, even on platforms that support them.
*/
char *
find_executable (const char *wrapper)
{
  int has_slash = 0;
  const char *p;
  const char *p_next;
  /* static buffer for getcwd */
  char tmp[LT_PATHMAX + 1];
  int tmp_len;
  char *concat_name;

  lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n",
                  nonempty (wrapper));

  if ((wrapper == NULL) || (*wrapper == '\0'))
    return NULL;

  /* Absolute path? */
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
  if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
    {
      concat_name = xstrdup (wrapper);
      if (check_executable (concat_name))
	return concat_name;
      XFREE (concat_name);
    }
  else
    {
#endif
      if (IS_DIR_SEPARATOR (wrapper[0]))
	{
	  concat_name = xstrdup (wrapper);
	  if (check_executable (concat_name))
	    return concat_name;
	  XFREE (concat_name);
	}
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
    }
#endif

  for (p = wrapper; *p; p++)
    if (*p == '/')
      {
	has_slash = 1;
	break;
      }
  if (!has_slash)
    {
      /* no slashes; search PATH */
      const char *path = getenv ("PATH");
      if (path != NULL)
	{
	  for (p = path; *p; p = p_next)
	    {
	      const char *q;
	      size_t p_len;
	      for (q = p; *q; q++)
		if (IS_PATH_SEPARATOR (*q))
		  break;
	      p_len = q - p;
	      p_next = (*q == '\0' ? q : q + 1);
	      if (p_len == 0)
		{
		  /* empty path: current directory */
		  if (getcwd (tmp, LT_PATHMAX) == NULL)
		    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
                              nonnull (strerror (errno)));
		  tmp_len = strlen (tmp);
		  concat_name =
		    XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
		  memcpy (concat_name, tmp, tmp_len);
		  concat_name[tmp_len] = '/';
		  strcpy (concat_name + tmp_len + 1, wrapper);
		}
	      else
		{
		  concat_name =
		    XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
		  memcpy (concat_name, p, p_len);
		  concat_name[p_len] = '/';
		  strcpy (concat_name + p_len + 1, wrapper);
		}
	      if (check_executable (concat_name))
		return concat_name;
	      XFREE (concat_name);
	    }
	}
      /* not found in PATH; assume curdir */
    }
  /* Relative path | not found in path: prepend cwd */
  if (getcwd (tmp, LT_PATHMAX) == NULL)
    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
              nonnull (strerror (errno)));
  tmp_len = strlen (tmp);
  concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
  memcpy (concat_name, tmp, tmp_len);
  concat_name[tmp_len] = '/';
  strcpy (concat_name + tmp_len + 1, wrapper);

  if (check_executable (concat_name))
    return concat_name;
  XFREE (concat_name);
  return NULL;
}

char *
chase_symlinks (const char *pathspec)
{
#ifndef S_ISLNK
  return xstrdup (pathspec);
#else
  char buf[LT_PATHMAX];
  struct stat s;
  char *tmp_pathspec = xstrdup (pathspec);
  char *p;
  int has_symlinks = 0;
  while (strlen (tmp_pathspec) && !has_symlinks)
    {
      lt_debugprintf (__FILE__, __LINE__,
		      "checking path component for symlinks: %s\n",
		      tmp_pathspec);
      if (lstat (tmp_pathspec, &s) == 0)
	{
	  if (S_ISLNK (s.st_mode) != 0)
	    {
	      has_symlinks = 1;
	      break;
	    }

	  /* search backwards for last DIR_SEPARATOR */
	  p = tmp_pathspec + strlen (tmp_pathspec) - 1;
	  while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
	    p--;
	  if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
	    {
	      /* no more DIR_SEPARATORS left */
	      break;
	    }
	  *p = '\0';
	}
      else
	{
	  lt_fatal (__FILE__, __LINE__,
		    "error accessing file \"%s\": %s",
		    tmp_pathspec, nonnull (strerror (errno)));
	}
    }
  XFREE (tmp_pathspec);

  if (!has_symlinks)
    {
      return xstrdup (pathspec);
    }

  tmp_pathspec = realpath (pathspec, buf);
  if (tmp_pathspec == 0)
    {
      lt_fatal (__FILE__, __LINE__,
		"could not follow symlinks for %s", pathspec);
    }
  return xstrdup (tmp_pathspec);
#endif
}

char *
strendzap (char *str, const char *pat)
{
  size_t len, patlen;

  assert (str != NULL);
  assert (pat != NULL);

  len = strlen (str);
  patlen = strlen (pat);

  if (patlen <= len)
    {
      str += len - patlen;
      if (strcmp (str, pat) == 0)
	*str = '\0';
    }
  return str;
}

void
lt_debugprintf (const char *file, int line, const char *fmt, ...)
{
  va_list args;
  if (lt_debug)
    {
      (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line);
      va_start (args, fmt);
      (void) vfprintf (stderr, fmt, args);
      va_end (args);
    }
}

static void
lt_error_core (int exit_status, const char *file,
	       int line, const char *mode,
	       const char *message, va_list ap)
{
  fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode);
  vfprintf (stderr, message, ap);
  fprintf (stderr, ".\n");

  if (exit_status >= 0)
    exit (exit_status);
}

void
lt_fatal (const char *file, int line, const char *message, ...)
{
  va_list ap;
  va_start (ap, message);
  lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap);
  va_end (ap);
}

static const char *
nonnull (const char *s)
{
  return s ? s : "(null)";
}

static const char *
nonempty (const char *s)
{
  return (s && !*s) ? "(empty)" : nonnull (s);
}

void
lt_setenv (const char *name, const char *value)
{
  lt_debugprintf (__FILE__, __LINE__,
		  "(lt_setenv) setting '%s' to '%s'\n",
                  nonnull (name), nonnull (value));
  {
#ifdef HAVE_SETENV
    /* always make a copy, for consistency with !HAVE_SETENV */
    char *str = xstrdup (value);
    setenv (name, str, 1);
#else
    int len = strlen (name) + 1 + strlen (value) + 1;
    char *str = XMALLOC (char, len);
    sprintf (str, "%s=%s", name, value);
    if (putenv (str) != EXIT_SUCCESS)
      {
        XFREE (str);
      }
#endif
  }
}

char *
lt_extend_str (const char *orig_value, const char *add, int to_end)
{
  char *new_value;
  if (orig_value && *orig_value)
    {
      int orig_value_len = strlen (orig_value);
      int add_len = strlen (add);
      new_value = XMALLOC (char, add_len + orig_value_len + 1);
      if (to_end)
        {
          strcpy (new_value, orig_value);
          strcpy (new_value + orig_value_len, add);
        }
      else
        {
          strcpy (new_value, add);
          strcpy (new_value + add_len, orig_value);
        }
    }
  else
    {
      new_value = xstrdup (add);
    }
  return new_value;
}

void
lt_update_exe_path (const char *name, const char *value)
{
  lt_debugprintf (__FILE__, __LINE__,
		  "(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
                  nonnull (name), nonnull (value));

  if (name && *name && value && *value)
    {
      char *new_value = lt_extend_str (getenv (name), value, 0);
      /* some systems can't cope with a ':'-terminated path #' */
      int len = strlen (new_value);
      while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1]))
        {
          new_value[len-1] = '\0';
        }
      lt_setenv (name, new_value);
      XFREE (new_value);
    }
}

void
lt_update_lib_path (const char *name, const char *value)
{
  lt_debugprintf (__FILE__, __LINE__,
		  "(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
                  nonnull (name), nonnull (value));

  if (name && *name && value && *value)
    {
      char *new_value = lt_extend_str (getenv (name), value, 0);
      lt_setenv (name, new_value);
      XFREE (new_value);
    }
}

'''
	    case (host_os) {
	      mingw* {
		cat <<< '''

/* Prepares an argument vector before calling spawn().
   Note that spawn() does not by itself call the command interpreter
     (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") :
      ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
         GetVersionEx(&v);
         v.dwPlatformId == VER_PLATFORM_WIN32_NT;
      }) ? "cmd.exe" : "command.com").
   Instead it simply concatenates the arguments, separated by ' ', and calls
   CreateProcess().  We must quote the arguments since Win32 CreateProcess()
   interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a
   special way:
   - Space and tab are interpreted as delimiters. They are not treated as
     delimiters if they are surrounded by double quotes: "...".
   - Unescaped double quotes are removed from the input. Their only effect is
     that within double quotes, space and tab are treated like normal
     characters.
   - Backslashes not followed by double quotes are not special.
   - But 2*n+1 backslashes followed by a double quote become
     n backslashes followed by a double quote (n >= 0):
       \" -> "
       \\\" -> \"
       \\\\\" -> \\"
 */
#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
char **
prepare_spawn (char **argv)
{
  size_t argc;
  char **new_argv;
  size_t i;

  /* Count number of arguments.  */
  for (argc = 0; argv[argc] != NULL; argc++)
    ;

  /* Allocate new argument vector.  */
  new_argv = XMALLOC (char *, argc + 1);

  /* Put quoted arguments into the new argument vector.  */
  for (i = 0; i < argc; i++)
    {
      const char *string = argv[i];

      if (string[0] == '\0')
	new_argv[i] = xstrdup ("\"\"");
      else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL)
	{
	  int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL);
	  size_t length;
	  unsigned int backslashes;
	  const char *s;
	  char *quoted_string;
	  char *p;

	  length = 0;
	  backslashes = 0;
	  if (quote_around)
	    length++;
	  for (s = string; *s != '\0'; s++)
	    {
	      char c = *s;
	      if (c == '"')
		length += backslashes + 1;
	      length++;
	      if (c == '\\')
		backslashes++;
	      else
		backslashes = 0;
	    }
	  if (quote_around)
	    length += backslashes + 1;

	  quoted_string = XMALLOC (char, length + 1);

	  p = quoted_string;
	  backslashes = 0;
	  if (quote_around)
	    *p++ = '"';
	  for (s = string; *s != '\0'; s++)
	    {
	      char c = *s;
	      if (c == '"')
		{
		  unsigned int j;
		  for (j = backslashes + 1; j > 0; j--)
		    *p++ = '\\';
		}
	      *p++ = c;
	      if (c == '\\')
		backslashes++;
	      else
		backslashes = 0;
	    }
	  if (quote_around)
	    {
	      unsigned int j;
	      for (j = backslashes; j > 0; j--)
		*p++ = '\\';
	      *p++ = '"';
	    }
	  *p = '\0';

	  new_argv[i] = quoted_string;
	}
      else
	new_argv[i] = (char *) string;
    }
  new_argv[argc] = NULL;

  return new_argv;
}
'''
		}
	    }

            cat <<< '''
void lt_dump_script (FILE* f)
{
'''
	    func_emit_wrapper yes |
	      $SED -n -e '
s/^\(.\{79\}\)\(..*\)/\1\
\2/
h
s/\([\\"]\)/\\\1/g
s/$/\\n/
s/\([^\n]*\).*/  fputs ("\1", f);/p
g
D'
            cat <<< '''
}
'''
}
# end: func_emit_cwrapperexe_src

# func_win32_import_lib_p ARG
# True if ARG is an import lib, as indicated by $file_magic_cmd
proc func_win32_import_lib_p {
    $opt_debug
    case{
    *import* { : }
    * { false }
    }
}

# func_mode_link arg...
proc func_mode_link {
    $opt_debug
    case (host) {
    *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc* {
      # It is impossible to link a dll without this setting, and
      # we shouldn't force the makefile maintainer to figure out
      # which system we are compiling for in order to pass an extra
      # flag for every libtool invocation.
      # allow_undefined=no

      # FIXME: Unfortunately, there are problems with the above when trying
      # to make a dll which has undefined symbols, in which case not
      # even a static library is built.  For now, we need to specify
      # -no-undefined on the libtool link line when we can be certain
      # that all symbols are satisfied, otherwise we get a static library.
      setvar allow_undefined = 'yes'
      }
    * {
      setvar allow_undefined = 'yes'
      }
    }
    setvar libtool_args = "$nonopt"
    setvar base_compile = ""$nonopt $[join(ARGV)]""
    setvar compile_command = "$nonopt"
    setvar finalize_command = "$nonopt"

    setvar compile_rpath = ''
    setvar finalize_rpath = ''
    setvar compile_shlibpath = ''
    setvar finalize_shlibpath = ''
    setvar convenience = ''
    setvar old_convenience = ''
    setvar deplibs = ''
    setvar old_deplibs = ''
    setvar compiler_flags = ''
    setvar linker_flags = ''
    setvar dllsearchpath = ''
    setvar lib_search_path = $(pwd)
    setvar inst_prefix_dir = ''
    setvar new_inherited_linker_flags = ''

    setvar avoid_version = 'no'
    setvar bindir = ''
    setvar dlfiles = ''
    setvar dlprefiles = ''
    setvar dlself = 'no'
    setvar export_dynamic = 'no'
    setvar export_symbols = ''
    setvar export_symbols_regex = ''
    setvar generated = ''
    setvar libobjs = ''
    setvar ltlibs = ''
    setvar module = 'no'
    setvar no_install = 'no'
    setvar objs = ''
    setvar non_pic_objects = ''
    setvar precious_files_regex = ''
    setvar prefer_static_libs = 'no'
    setvar preload = 'no'
    setvar prev = ''
    setvar prevarg = ''
    setvar release = ''
    setvar rpath = ''
    setvar xrpath = ''
    setvar perm_rpath = ''
    setvar temp_rpath = ''
    setvar thread_safe = 'no'
    setvar vinfo = ''
    setvar vinfo_number = 'no'
    setvar weak_libs = ''
    setvar single_module = ""${wl}-single_module""
    func_infer_tag $base_compilefor arg in @ARGV {
      case (arg) {
      -shared {
	test $build_libtool_libs != yes && \
	  func_fatal_configuration "can not build a shared library"
	setvar build_old_libs = 'no'
	break
	}
      -all-static | -static | -static-libtool-libs {
	case (arg) {
	-all-static {
	  if test $build_libtool_libs = yes && test -z $link_static_flag {
	    func_warning "complete static linking is impossible in this configuration"
	  }
	  if test -n $link_static_flag {
	    setvar dlopen_self = "$dlopen_self_static"
	  }
	  setvar prefer_static_libs = 'yes'
	  }
	-static {
	  if test -z $pic_flag && test -n $link_static_flag {
	    setvar dlopen_self = "$dlopen_self_static"
	  }
	  setvar prefer_static_libs = 'built'
	  }
	-static-libtool-libs {
	  if test -z $pic_flag && test -n $link_static_flag {
	    setvar dlopen_self = "$dlopen_self_static"
	  }
	  setvar prefer_static_libs = 'yes'
	  }
	}
	setvar build_libtool_libs = 'no'
	setvar build_old_libs = 'yes'
	break
	}
      }
    }

    # See if our shared archives depend on static archives.
    test -n $old_archive_from_new_cmds && setvar build_old_libs = 'yes'

    # Go through the arguments, transforming them on the way.
    while test "$Argc" -gt 0 {
      setvar arg = "$1"
      shift
      func_quote_for_eval $arg
      setvar qarg = "$func_quote_for_eval_unquoted_result"
      func_append libtool_args " $func_quote_for_eval_result"

      # If the previous option needs an argument, assign it.
      if test -n $prev {
	case (prev) {
	output {
	  func_append compile_command " @OUTPUT@"
	  func_append finalize_command " @OUTPUT@"
	  }
	}

	case (prev) {
	bindir {
	  setvar bindir = "$arg"
	  setvar prev = ''
	  continue
	  }
	dlfiles|dlprefiles {
	  if test $preload = no {
	    # Add the symbol object into the linking commands.
	    func_append compile_command " @SYMFILE@"
	    func_append finalize_command " @SYMFILE@"
	    setvar preload = 'yes'
	  }
	  case (arg) {
	  *.la | *.lo { }  # We handle these cases below.
	  force {
	    if test $dlself = no {
	      setvar dlself = 'needless'
	      setvar export_dynamic = 'yes'
	    }
	    setvar prev = ''
	    continue
	    }
	  self {
	    if test $prev = dlprefiles {
	      setvar dlself = 'yes'
	    } elif test $prev = dlfiles && test $dlopen_self != yes {
	      setvar dlself = 'yes'
	    } else {
	      setvar dlself = 'needless'
	      setvar export_dynamic = 'yes'
	    }
	    setvar prev = ''
	    continue
	    }
	  * {
	    if test $prev = dlfiles {
	      func_append dlfiles " $arg"
	    } else {
	      func_append dlprefiles " $arg"
	    }
	    setvar prev = ''
	    continue
	    }
	  }
	  }
	expsyms {
	  setvar export_symbols = "$arg"
	  test -f $arg \
	    || func_fatal_error "symbol file \`$arg' does not exist"
	  setvar prev = ''
	  continue
	  }
	expsyms_regex {
	  setvar export_symbols_regex = "$arg"
	  setvar prev = ''
	  continue
	  }
	framework {
	  case (host) {
	    *-*-darwin* {
	      case{
		*" $qarg.ltframework "* { }
		* { func_append deplibs " $qarg.ltframework" # this is fixed later
		   }
	      }
	      }
	  }
	  setvar prev = ''
	  continue
	  }
	inst_prefix {
	  setvar inst_prefix_dir = "$arg"
	  setvar prev = ''
	  continue
	  }
	objectlist {
	  if test -f $arg {
	    setvar save_arg = "$arg"
	    setvar moreargs = ''
	    for fil in `cat "$save_arg"`
	    {
#	      func_append moreargs " $fil"
	      setvar arg = "$fil"
	      # A libtool-controlled object.

	      # Check to see that this really is a libtool object.
	      if func_lalib_unsafe_p $arg {
		setvar pic_object = ''
		setvar non_pic_object = ''

		# Read the .lo file
		func_source $arg

		if test -z $pic_object ||
		   test -z $non_pic_object ||
		   test $pic_object = none &&
		   test $non_pic_object = none {
		  func_fatal_error "cannot find name of object for \`$arg'"
		}

		# Extract subdirectory from the argument.
		func_dirname $arg "/" ""
		setvar xdir = "$func_dirname_result"

		if test $pic_object != none {
		  # Prepend the subdirectory the object is found in.
		  setvar pic_object = ""$xdir$pic_object""

		  if test $prev = dlfiles {
		    if test $build_libtool_libs = yes && test $dlopen_support = yes {
		      func_append dlfiles " $pic_object"
		      setvar prev = ''
		      continue
		    } else {
		      # If libtool objects are unsupported, then we need to preload.
		      setvar prev = 'dlprefiles'
		    }
		  }

		  # CHECK ME:  I think I busted this.  -Ossama
		  if test $prev = dlprefiles {
		    # Preload the old-style object.
		    func_append dlprefiles " $pic_object"
		    setvar prev = ''
		  }

		  # A PIC object.
		  func_append libobjs " $pic_object"
		  setvar arg = "$pic_object"
		}

		# Non-PIC object.
		if test $non_pic_object != none {
		  # Prepend the subdirectory the object is found in.
		  setvar non_pic_object = ""$xdir$non_pic_object""

		  # A standard non-PIC object
		  func_append non_pic_objects " $non_pic_object"
		  if test -z $pic_object || test $pic_object = none  {
		    setvar arg = "$non_pic_object"
		  }
		} else {
		  # If the PIC object exists, use it instead.
		  # $xdir was prepended to $pic_object above.
		  setvar non_pic_object = "$pic_object"
		  func_append non_pic_objects " $non_pic_object"
		}
	      } else {
		# Only an error if not doing a dry-run.
		if $opt_dry_run {
		  # Extract subdirectory from the argument.
		  func_dirname $arg "/" ""
		  setvar xdir = "$func_dirname_result"

		  func_lo2o $arg
		  setvar pic_object = "$xdir$objdir/$func_lo2o_result"
		  setvar non_pic_object = "$xdir$func_lo2o_result"
		  func_append libobjs " $pic_object"
		  func_append non_pic_objects " $non_pic_object"
	        } else {
		  func_fatal_error "\`$arg' is not a valid libtool object"
		}
	      }
	    }
	  } else {
	    func_fatal_error "link input file \`$arg' does not exist"
	  }
	  setvar arg = "$save_arg"
	  setvar prev = ''
	  continue
	  }
	precious_regex {
	  setvar precious_files_regex = "$arg"
	  setvar prev = ''
	  continue
	  }
	release {
	  setvar release = ""-$arg""
	  setvar prev = ''
	  continue
	  }
	rpath | xrpath {
	  # We need an absolute path.
	  case (arg) {
	  [\\/]* | [A-Za-z]:[\\/]* { }
	  * {
	    func_fatal_error "only absolute run-paths are allowed"
	    }
	  }
	  if test $prev = rpath {
	    case{
	    *" $arg "* { }
	    * { func_append rpath " $arg" }
	    }
	  } else {
	    case{
	    *" $arg "* { }
	    * { func_append xrpath " $arg" }
	    }
	  }
	  setvar prev = ''
	  continue
	  }
	shrext {
	  setvar shrext_cmds = "$arg"
	  setvar prev = ''
	  continue
	  }
	weak {
	  func_append weak_libs " $arg"
	  setvar prev = ''
	  continue
	  }
	xcclinker {
	  func_append linker_flags " $qarg"
	  func_append compiler_flags " $qarg"
	  setvar prev = ''
	  func_append compile_command " $qarg"
	  func_append finalize_command " $qarg"
	  continue
	  }
	xcompiler {
	  func_append compiler_flags " $qarg"
	  setvar prev = ''
	  func_append compile_command " $qarg"
	  func_append finalize_command " $qarg"
	  continue
	  }
	xlinker {
	  func_append linker_flags " $qarg"
	  func_append compiler_flags " $wl$qarg"
	  setvar prev = ''
	  func_append compile_command " $wl$qarg"
	  func_append finalize_command " $wl$qarg"
	  continue
	  }
	* {
	  eval "$prev=\"\$arg\""
	  setvar prev = ''
	  continue
	  }
	}
      } # test -n "$prev"

      setvar prevarg = "$arg"

      case (arg) {
      -all-static {
	if test -n $link_static_flag {
	  # See comment for -static flag below, for more details.
	  func_append compile_command " $link_static_flag"
	  func_append finalize_command " $link_static_flag"
	}
	continue
	}

      -allow-undefined {
	# FIXME: remove this flag sometime in the future.
	func_fatal_error "\`-allow-undefined' must not be used because it is the default"
	}

      -avoid-version {
	setvar avoid_version = 'yes'
	continue
	}

      -bindir {
	setvar prev = 'bindir'
	continue
	}

      -dlopen {
	setvar prev = 'dlfiles'
	continue
	}

      -dlpreopen {
	setvar prev = 'dlprefiles'
	continue
	}

      -export-dynamic {
	setvar export_dynamic = 'yes'
	continue
	}

      -export-symbols | -export-symbols-regex {
	if test -n $export_symbols || test -n $export_symbols_regex {
	  func_fatal_error "more than one -exported-symbols argument is not allowed"
	}
	if test "X$arg" = "X-export-symbols" {
	  setvar prev = 'expsyms'
	} else {
	  setvar prev = 'expsyms_regex'
	}
	continue
	}

      -framework {
	setvar prev = 'framework'
	continue
	}

      -inst-prefix-dir {
	setvar prev = 'inst_prefix'
	continue
	}

      # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
      # so, if we see these flags be careful not to treat them like -L
      -L[A-Z][A-Z]*:* {
	case (with_gcc) {
	no/*-*-irix* | /*-*-irix* {
	  func_append compile_command " $arg"
	  func_append finalize_command " $arg"
	  }
	}
	continue
	}

      -L* {
	func_stripname "-L" '' $arg
	if test -z $func_stripname_result {
	  if test "$Argc" -gt 0 {
	    func_fatal_error "require no space between \`-L' and \`$1'"
	  } else {
	    func_fatal_error "need path for \`-L' option"
	  }
	}
	func_resolve_sysroot $func_stripname_result
	setvar dir = "$func_resolve_sysroot_result"
	# We need an absolute path.
	case (dir) {
	[\\/]* | [A-Za-z]:[\\/]* { }
	* {
	  setvar absdir = $(cd $dir && pwd)
	  test -z $absdir && \
	    func_fatal_error "cannot determine absolute directory name of \`$dir'"
	  setvar dir = "$absdir"
	  }
	}
	case{
	*" -L$dir "* | *" $arg "* {
	  # Will only happen for absolute or sysroot arguments
	  }
	* {
	  # Preserve sysroot, but never include relative directories
	  case (dir) {
	    [\\/]* | [A-Za-z]:[\\/]* | =* { func_append deplibs " $arg" }
	    * { func_append deplibs " -L$dir" }
	  }
	  func_append lib_search_path " $dir"
	  }
	}
	case (host) {
	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc* {
	  setvar testbindir = $($ECHO $dir | $SED 's*/lib$*/bin*)
	  case{
	  *":$dir:"* { }
	  :: { setvar dllsearchpath = "$dir"}
	  * { func_append dllsearchpath ":$dir"}
	  }
	  case{
	  *":$testbindir:"* { }
	  :: { setvar dllsearchpath = "$testbindir"}
	  * { func_append dllsearchpath ":$testbindir"}
	  }
	  }
	}
	continue
	}

      -l* {
	if test "X$arg" = "X-lc" || test "X$arg" = "X-lm" {
	  case (host) {
	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku* {
	    # These systems don't actually have a C or math library (as such)
	    continue
	    }
	  *-*-os2* {
	    # These systems don't actually have a C library (as such)
	    test "X$arg" = "X-lc" && continue
	    }
	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* {
	    # Do not include libc due to us having libc/libc_r.
	    test "X$arg" = "X-lc" && continue
	    }
	  *-*-rhapsody* | *-*-darwin1.[012] {
	    # Rhapsody C and math libraries are in the System framework
	    func_append deplibs " System.ltframework"
	    continue
	    }
	  *-*-sco3.2v5* | *-*-sco5v6* {
	    # Causes problems with __ctype
	    test "X$arg" = "X-lc" && continue
	    }
	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX* {
	    # Compiler inserts libc in the correct place for threads to work
	    test "X$arg" = "X-lc" && continue
	    }
	  }
	} elif test "X$arg" = "X-lc_r" {
	 case (host) {
	 *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* {
	   # Do not include libc_r directly, use -pthread flag.
	   continue
	   }
	 }
	}
	func_append deplibs " $arg"
	continue
	}

      -module {
	setvar module = 'yes'
	continue
	}

      # Tru64 UNIX uses -model [arg] to determine the layout of C++
      # classes, name mangling, and exception handling.
      # Darwin uses the -arch flag to determine output architecture.
      -model|-arch|-isysroot|--sysroot {
	func_append compiler_flags " $arg"
	func_append compile_command " $arg"
	func_append finalize_command " $arg"
	setvar prev = 'xcompiler'
	continue
	}

      -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
      |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=* {
	func_append compiler_flags " $arg"
	func_append compile_command " $arg"
	func_append finalize_command " $arg"
	case{
	    *" $arg "* { }
	    *  { func_append new_inherited_linker_flags " $arg" }
	}
	continue
	}

      -multi_module {
	setvar single_module = ""${wl}-multi_module""
	continue
	}

      -no-fast-install {
	setvar fast_install = 'no'
	continue
	}

      -no-install {
	case (host) {
	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc* {
	  # The PATH hackery in wrapper scripts is required on Windows
	  # and Darwin in order for the loader to find any dlls it needs.
	  func_warning "\`-no-install' is ignored for $host"
	  func_warning "assuming \`-no-fast-install' instead"
	  setvar fast_install = 'no'
	  }
	* { setvar no_install = 'yes' }
	}
	continue
	}

      -no-undefined {
	setvar allow_undefined = 'no'
	continue
	}

      -objectlist {
	setvar prev = 'objectlist'
	continue
	}

      -o { setvar prev = 'output' }

      -precious-files-regex {
	setvar prev = 'precious_regex'
	continue
	}

      -release {
	setvar prev = 'release'
	continue
	}

      -rpath {
	setvar prev = 'rpath'
	continue
	}

      -R {
	setvar prev = 'xrpath'
	continue
	}

      -R* {
	func_stripname '-R' '' $arg
	setvar dir = "$func_stripname_result"
	# We need an absolute path.
	case (dir) {
	[\\/]* | [A-Za-z]:[\\/]* { }
	=* {
	  func_stripname '=' '' $dir
	  setvar dir = "$lt_sysroot$func_stripname_result"
	  }
	* {
	  func_fatal_error "only absolute run-paths are allowed"
	  }
	}
	case{
	*" $dir "* { }
	* { func_append xrpath " $dir" }
	}
	continue
	}

      -shared {
	# The effects of -shared are defined in a previous loop.
	continue
	}

      -shrext {
	setvar prev = 'shrext'
	continue
	}

      -static | -static-libtool-libs {
	# The effects of -static are defined in a previous loop.
	# We used to do the same as -all-static on platforms that
	# didn't have a PIC flag, but the assumption that the effects
	# would be equivalent was wrong.  It would break on at least
	# Digital Unix and AIX.
	continue
	}

      -thread-safe {
	setvar thread_safe = 'yes'
	continue
	}

      -version-info {
	setvar prev = 'vinfo'
	continue
	}

      -version-number {
	setvar prev = 'vinfo'
	setvar vinfo_number = 'yes'
	continue
	}

      -weak {
        setvar prev = 'weak'
	continue
	}

      -Wc,* {
	func_stripname '-Wc,' '' $arg
	setvar args = "$func_stripname_result"
	setvar arg = ''
	setvar save_ifs = "$IFS"; setvar IFS = '',''
	for flag in $args {
	  setvar IFS = "$save_ifs"
          func_quote_for_eval $flag
	  func_append arg " $func_quote_for_eval_result"
	  func_append compiler_flags " $func_quote_for_eval_result"
	}
	setvar IFS = "$save_ifs"
	func_stripname ' ' '' $arg
	setvar arg = "$func_stripname_result"
	}

      -Wl,* {
	func_stripname '-Wl,' '' $arg
	setvar args = "$func_stripname_result"
	setvar arg = ''
	setvar save_ifs = "$IFS"; setvar IFS = '',''
	for flag in $args {
	  setvar IFS = "$save_ifs"
          func_quote_for_eval $flag
	  func_append arg " $wl$func_quote_for_eval_result"
	  func_append compiler_flags " $wl$func_quote_for_eval_result"
	  func_append linker_flags " $func_quote_for_eval_result"
	}
	setvar IFS = "$save_ifs"
	func_stripname ' ' '' $arg
	setvar arg = "$func_stripname_result"
	}

      -Xcompiler {
	setvar prev = 'xcompiler'
	continue
	}

      -Xlinker {
	setvar prev = 'xlinker'
	continue
	}

      -XCClinker {
	setvar prev = 'xcclinker'
	continue
	}

      # -msg_* for osf cc
      -msg_* {
	func_quote_for_eval $arg
	setvar arg = "$func_quote_for_eval_result"
	}

      # Flags to be passed through unchanged, with rationale:
      # -64, -mips[0-9]      enable 64-bit mode for the SGI compiler
      # -r[0-9][0-9]*        specify processor for the SGI compiler
      # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler
      # +DA*, +DD*           enable 64-bit mode for the HP compiler
      # -q*                  compiler args for the IBM compiler
      # -m*, -t[45]*, -txscale* architecture-specific flags for GCC
      # -F/path              path to uninstalled frameworks, gcc on darwin
      # -p, -pg, --coverage, -fprofile-*  profiling flags for GCC
      # @file                GCC response files
      # -tp=*                Portland pgcc target processor selection
      # --sysroot=*          for sysroot support
      # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
      -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
      -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
      -O*|-flto*|-fwhopr*|-fuse-linker-plugin {
        func_quote_for_eval $arg
	setvar arg = "$func_quote_for_eval_result"
        func_append compile_command " $arg"
        func_append finalize_command " $arg"
        func_append compiler_flags " $arg"
        continue
        }

      # Some other compiler flag.
      -* | +* {
        func_quote_for_eval $arg
	setvar arg = "$func_quote_for_eval_result"
	}

      *.$objext {
	# A standard object.
	func_append objs " $arg"
	}

      *.lo {
	# A libtool-controlled object.

	# Check to see that this really is a libtool object.
	if func_lalib_unsafe_p $arg {
	  setvar pic_object = ''
	  setvar non_pic_object = ''

	  # Read the .lo file
	  func_source $arg

	  if test -z $pic_object ||
	     test -z $non_pic_object ||
	     test $pic_object = none &&
	     test $non_pic_object = none {
	    func_fatal_error "cannot find name of object for \`$arg'"
	  }

	  # Extract subdirectory from the argument.
	  func_dirname $arg "/" ""
	  setvar xdir = "$func_dirname_result"

	  if test $pic_object != none {
	    # Prepend the subdirectory the object is found in.
	    setvar pic_object = ""$xdir$pic_object""

	    if test $prev = dlfiles {
	      if test $build_libtool_libs = yes && test $dlopen_support = yes {
		func_append dlfiles " $pic_object"
		setvar prev = ''
		continue
	      } else {
		# If libtool objects are unsupported, then we need to preload.
		setvar prev = 'dlprefiles'
	      }
	    }

	    # CHECK ME:  I think I busted this.  -Ossama
	    if test $prev = dlprefiles {
	      # Preload the old-style object.
	      func_append dlprefiles " $pic_object"
	      setvar prev = ''
	    }

	    # A PIC object.
	    func_append libobjs " $pic_object"
	    setvar arg = "$pic_object"
	  }

	  # Non-PIC object.
	  if test $non_pic_object != none {
	    # Prepend the subdirectory the object is found in.
	    setvar non_pic_object = ""$xdir$non_pic_object""

	    # A standard non-PIC object
	    func_append non_pic_objects " $non_pic_object"
	    if test -z $pic_object || test $pic_object = none  {
	      setvar arg = "$non_pic_object"
	    }
	  } else {
	    # If the PIC object exists, use it instead.
	    # $xdir was prepended to $pic_object above.
	    setvar non_pic_object = "$pic_object"
	    func_append non_pic_objects " $non_pic_object"
	  }
	} else {
	  # Only an error if not doing a dry-run.
	  if $opt_dry_run {
	    # Extract subdirectory from the argument.
	    func_dirname $arg "/" ""
	    setvar xdir = "$func_dirname_result"

	    func_lo2o $arg
	    setvar pic_object = "$xdir$objdir/$func_lo2o_result"
	    setvar non_pic_object = "$xdir$func_lo2o_result"
	    func_append libobjs " $pic_object"
	    func_append non_pic_objects " $non_pic_object"
	  } else {
	    func_fatal_error "\`$arg' is not a valid libtool object"
	  }
	}
	}

      *.$libext {
	# An archive.
	func_append deplibs " $arg"
	func_append old_deplibs " $arg"
	continue
	}

      *.la {
	# A libtool-controlled library.

	func_resolve_sysroot $arg
	if test $prev = dlfiles {
	  # This library was specified with -dlopen.
	  func_append dlfiles " $func_resolve_sysroot_result"
	  setvar prev = ''
	} elif test $prev = dlprefiles {
	  # The library was specified with -dlpreopen.
	  func_append dlprefiles " $func_resolve_sysroot_result"
	  setvar prev = ''
	} else {
	  func_append deplibs " $func_resolve_sysroot_result"
	}
	continue
	}

      # Some other compiler argument.
      * {
	# Unknown arguments in both finalize_command and compile_command need
	# to be aesthetically quoted because they are evaled later.
	func_quote_for_eval $arg
	setvar arg = "$func_quote_for_eval_result"
	}
      } # arg

      # Now actually substitute the argument into the commands.
      if test -n $arg {
	func_append compile_command " $arg"
	func_append finalize_command " $arg"
      }
    } # argument parsing loop

    test -n $prev && \
      func_fatal_help "the \`$prevarg' option requires an argument"

    if test $export_dynamic = yes && test -n $export_dynamic_flag_spec {
      eval arg='"'$export_dynamic_flag_spec'"'
      func_append compile_command " $arg"
      func_append finalize_command " $arg"
    }

    setvar oldlibs = ''
    # calculate the name of the file, without its directory
    func_basename $output
    setvar outputname = "$func_basename_result"
    setvar libobjs_save = "$libobjs"

    if test -n $shlibpath_var {
      # get the directories listed in $shlibpath_var
      eval shlib_search_path='`''$'ECHO '"''$'{$shlibpath_var}'"' '|' '$'SED '''s/:/ /g''''`'
    } else {
      setvar shlib_search_path = ''
    }
    eval sys_lib_search_path='"'$sys_lib_search_path_spec'"'
    eval sys_lib_dlsearch_path='"'$sys_lib_dlsearch_path_spec'"'

    func_dirname $output "/" ""
    setvar output_objdir = ""$func_dirname_result$objdir""
    func_to_tool_file "$output_objdir/"
    setvar tool_output_objdir = "$func_to_tool_file_result"
    # Create the object directory.
    func_mkdir_p $output_objdir

    # Determine the type of output
    case (output) {
    "" {
      func_fatal_help "you must specify an output file"
      }
    *.$libext { setvar linkmode = 'oldlib' }
    *.lo | *.$objext { setvar linkmode = 'obj' }
    *.la { setvar linkmode = 'lib' }
    * { setvar linkmode = 'prog' } # Anything else should be a program.
    }

    setvar specialdeplibs = ''

    setvar libs = ''
    # Find all interdependent deplibs by searching for libraries
    # that are linked more than once (e.g. -la -lb -la)
    for deplib in $deplibs {
      if $opt_preserve_dup_deps  {
	case{
	*" $deplib "* { func_append specialdeplibs " $deplib" }
	}
      }
      func_append libs " $deplib"
    }

    if test $linkmode = lib {
      setvar libs = ""$predeps $libs $compiler_lib_search_path $postdeps""

      # Compute libraries that are listed more than once in $predeps
      # $postdeps and mark them as special (i.e., whose duplicates are
      # not to be eliminated).
      setvar pre_post_deps = ''
      if $opt_duplicate_compiler_generated_deps {
	for pre_post_dep in $predeps $postdeps {
	  case{
	  *" $pre_post_dep "* { func_append specialdeplibs " $pre_post_deps" }
	  }
	  func_append pre_post_deps " $pre_post_dep"
	}
      }
      setvar pre_post_deps = ''
    }

    setvar deplibs = ''
    setvar newdependency_libs = ''
    setvar newlib_search_path = ''
    setvar need_relink = 'no' # whether we're linking any uninstalled libtool libraries
    setvar notinst_deplibs = '' # not-installed libtool libraries
    setvar notinst_path = '' # paths that contain not-installed libtool libraries

    case (linkmode) {
    lib {
	setvar passes = ""conv dlpreopen link""
	for file in $dlfiles $dlprefiles {
	  case (file) {
	  *.la { }
	  * {
	    func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file"
	    }
	  }
	}
	}
    prog {
	setvar compile_deplibs = ''
	setvar finalize_deplibs = ''
	setvar alldeplibs = 'no'
	setvar newdlfiles = ''
	setvar newdlprefiles = ''
	setvar passes = ""conv scan dlopen dlpreopen link""
	}
    * {  setvar passes = ""conv""
	}
    }

    for pass in $passes {
      # The preopen pass in lib mode reverses $deplibs; put it back here
      # so that -L comes before libs that need it for instance...
      if test "$linkmode,$pass" = "lib,link" {
	## FIXME: Find the place where the list is rebuilt in the wrong
	##        order, and fix it there properly
        setvar tmp_deplibs = ''
	for deplib in $deplibs {
	  setvar tmp_deplibs = ""$deplib $tmp_deplibs""
	}
	setvar deplibs = "$tmp_deplibs"
      }

      if test "$linkmode,$pass" = "lib,link" ||
	 test "$linkmode,$pass" = "prog,scan" {
	setvar libs = "$deplibs"
	setvar deplibs = ''
      }
      if test $linkmode = prog {
	case (pass) {
	dlopen { setvar libs = "$dlfiles" }
	dlpreopen { setvar libs = "$dlprefiles" }
	link {
	  setvar libs = ""$deplibs %DEPLIBS%""
	  test "X$link_all_deplibs" != Xno && setvar libs = ""$libs $dependency_libs""
	  }
	}
      }
      if test "$linkmode,$pass" = "lib,dlpreopen" {
	# Collect and forward deplibs of preopened libtool libs
	for lib in $dlprefiles {
	  # Ignore non-libtool-libs
	  setvar dependency_libs = ''
	  func_resolve_sysroot $lib
	  case (lib) {
	  *.la {	func_source $func_resolve_sysroot_result }
	  }

	  # Collect preopened libtool deplibs, except any this library
	  # has declared as weak libs
	  for deplib in $dependency_libs {
	    func_basename $deplib
            setvar deplib_base = "$func_basename_result"
	    case{
	    *" $deplib_base "* { }
	    * { func_append deplibs " $deplib" }
	    }
	  }
	}
	setvar libs = "$dlprefiles"
      }
      if test $pass = dlopen {
	# Collect dlpreopened libraries
	setvar save_deplibs = "$deplibs"
	setvar deplibs = ''
      }

      for deplib in $libs {
	setvar lib = ''
	setvar found = 'no'
	case (deplib) {
	-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
        |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=* {
	  if test "$linkmode,$pass" = "prog,link" {
	    setvar compile_deplibs = ""$deplib $compile_deplibs""
	    setvar finalize_deplibs = ""$deplib $finalize_deplibs""
	  } else {
	    func_append compiler_flags " $deplib"
	    if test $linkmode = lib  {
		case{
		    *" $deplib "* { }
		    *  { func_append new_inherited_linker_flags " $deplib" }
		}
	    }
	  }
	  continue
	  }
	-l* {
	  if test $linkmode != lib && test $linkmode != prog {
	    func_warning "\`-l' is ignored for archives/objects"
	    continue
	  }
	  func_stripname '-l' '' $deplib
	  setvar name = "$func_stripname_result"
	  if test $linkmode = lib {
	    setvar searchdirs = ""$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path""
	  } else {
	    setvar searchdirs = ""$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path""
	  }
	  for searchdir in $searchdirs {
	    for search_ext in .la $std_shrext .so .a {
	      # Search the libtool library
	      setvar lib = ""$searchdir/lib${name}${search_ext}""
	      if test -f $lib {
		if test $search_ext = ".la" {
		  setvar found = 'yes'
		} else {
		  setvar found = 'no'
		}
		break 2
	      }
	    }
	  }
	  if test $found != yes {
	    # deplib doesn't seem to be a libtool library
	    if test "$linkmode,$pass" = "prog,link" {
	      setvar compile_deplibs = ""$deplib $compile_deplibs""
	      setvar finalize_deplibs = ""$deplib $finalize_deplibs""
	    } else {
	      setvar deplibs = ""$deplib $deplibs""
	      test $linkmode = lib && setvar newdependency_libs = ""$deplib $newdependency_libs""
	    }
	    continue
	  } else { # deplib is a libtool library
	    # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
	    # We need to do some special things here, and not later.
	    if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes"  {
	      case{
	      *" $deplib "* {
		if func_lalib_p $lib {
		  setvar library_names = ''
		  setvar old_library = ''
		  func_source $lib
		  for l in $old_library $library_names {
		    setvar ll = "$l"
		  }
		  if test "X$ll" = "X$old_library"  { # only static version available
		    setvar found = 'no'
		    func_dirname $lib "" "."
		    setvar ladir = "$func_dirname_result"
		    setvar lib = "$ladir/$old_library"
		    if test "$linkmode,$pass" = "prog,link" {
		      setvar compile_deplibs = ""$deplib $compile_deplibs""
		      setvar finalize_deplibs = ""$deplib $finalize_deplibs""
		    } else {
		      setvar deplibs = ""$deplib $deplibs""
		      test $linkmode = lib && setvar newdependency_libs = ""$deplib $newdependency_libs""
		    }
		    continue
		  }
		}
		}
	      * { }
	      }
	    }
	  }
	  } # -l
	*.ltframework {
	  if test "$linkmode,$pass" = "prog,link" {
	    setvar compile_deplibs = ""$deplib $compile_deplibs""
	    setvar finalize_deplibs = ""$deplib $finalize_deplibs""
	  } else {
	    setvar deplibs = ""$deplib $deplibs""
	    if test $linkmode = lib  {
		case{
		    *" $deplib "* { }
		    *  { func_append new_inherited_linker_flags " $deplib" }
		}
	    }
	  }
	  continue
	  }
	-L* {
	  case (linkmode) {
	  lib {
	    setvar deplibs = ""$deplib $deplibs""
	    test $pass = conv && continue
	    setvar newdependency_libs = ""$deplib $newdependency_libs""
	    func_stripname '-L' '' $deplib
	    func_resolve_sysroot $func_stripname_result
	    func_append newlib_search_path " $func_resolve_sysroot_result"
	    }
	  prog {
	    if test $pass = conv {
	      setvar deplibs = ""$deplib $deplibs""
	      continue
	    }
	    if test $pass = scan {
	      setvar deplibs = ""$deplib $deplibs""
	    } else {
	      setvar compile_deplibs = ""$deplib $compile_deplibs""
	      setvar finalize_deplibs = ""$deplib $finalize_deplibs""
	    }
	    func_stripname '-L' '' $deplib
	    func_resolve_sysroot $func_stripname_result
	    func_append newlib_search_path " $func_resolve_sysroot_result"
	    }
	  * {
	    func_warning "\`-L' is ignored for archives/objects"
	    }
	  } # linkmode
	  continue
	  } # -L
	-R* {
	  if test $pass = link {
	    func_stripname '-R' '' $deplib
	    func_resolve_sysroot $func_stripname_result
	    setvar dir = "$func_resolve_sysroot_result"
	    # Make sure the xrpath contains only unique directories.
	    case{
	    *" $dir "* { }
	    * { func_append xrpath " $dir" }
	    }
	  }
	  setvar deplibs = ""$deplib $deplibs""
	  continue
	  }
	*.la {
	  func_resolve_sysroot $deplib
	  setvar lib = "$func_resolve_sysroot_result"
	  }
	*.$libext {
	  if test $pass = conv {
	    setvar deplibs = ""$deplib $deplibs""
	    continue
	  }
	  case (linkmode) {
	  lib {
	    # Linking convenience modules into shared libraries is allowed,
	    # but linking other static libraries is non-portable.
	    case{
	    *" $deplib "* { }
	    * {
	      setvar valid_a_lib = 'no'
	      case (deplibs_check_method) {
		match_pattern* {
		  set dummy $deplibs_check_method; shift
		  setvar match_pattern_regex = $(expr $deplibs_check_method : "$1 \(.*\))
		  if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \
		    | $EGREP $match_pattern_regex > /dev/null {
		    setvar valid_a_lib = 'yes'
		  }
		}
		pass_all {
		  setvar valid_a_lib = 'yes'
		}
	      }
	      if test $valid_a_lib != yes {
		echo
		$ECHO "*** Warning: Trying to link with static lib archive $deplib."
		echo "*** I have the capability to make that library automatically link in when"
		echo "*** you link to this library.  But I can only do this if you have a"
		echo "*** shared version of the library, which you do not appear to have"
		echo "*** because the file extensions .$libext of this argument makes me believe"
		echo "*** that it is just a static archive that I should not use here."
	      } else {
		echo
		$ECHO "*** Warning: Linking the shared library $output against the"
		$ECHO "*** static library $deplib is not portable!"
		setvar deplibs = ""$deplib $deplibs""
	      }
	      }
	    }
	    continue
	    }
	  prog {
	    if test $pass != link {
	      setvar deplibs = ""$deplib $deplibs""
	    } else {
	      setvar compile_deplibs = ""$deplib $compile_deplibs""
	      setvar finalize_deplibs = ""$deplib $finalize_deplibs""
	    }
	    continue
	    }
	  } # linkmode
	  } # *.$libext
	*.lo | *.$objext {
	  if test $pass = conv {
	    setvar deplibs = ""$deplib $deplibs""
	  } elif test $linkmode = prog {
	    if test $pass = dlpreopen || test $dlopen_support != yes || test $build_libtool_libs = no {
	      # If there is no dlopen support or we're linking statically,
	      # we need to preload.
	      func_append newdlprefiles " $deplib"
	      setvar compile_deplibs = ""$deplib $compile_deplibs""
	      setvar finalize_deplibs = ""$deplib $finalize_deplibs""
	    } else {
	      func_append newdlfiles " $deplib"
	    }
	  }
	  continue
	  }
	%DEPLIBS% {
	  setvar alldeplibs = 'yes'
	  continue
	  }
	} # case $deplib

	if test $found = yes || test -f $lib { :
	} else {
	  func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'"
	}

	# Check to see that this really is a libtool archive.
	func_lalib_unsafe_p $lib \
	  || func_fatal_error "\`$lib' is not a valid libtool archive"

	func_dirname $lib "" "."
	setvar ladir = "$func_dirname_result"

	setvar dlname = ''
	setvar dlopen = ''
	setvar dlpreopen = ''
	setvar libdir = ''
	setvar library_names = ''
	setvar old_library = ''
	setvar inherited_linker_flags = ''
	# If the library was installed with an old release of libtool,
	# it will not redefine variables installed, or shouldnotlink
	setvar installed = 'yes'
	setvar shouldnotlink = 'no'
	setvar avoidtemprpath = ''


	# Read the .la file
	func_source $lib

	# Convert "-framework foo" to "foo.ltframework"
	if test -n $inherited_linker_flags {
	  setvar tmp_inherited_linker_flags = $($ECHO $inherited_linker_flags | $SED 's/-framework \([^ $]*\)/\1.ltframework/g)
	  for tmp_inherited_linker_flag in $tmp_inherited_linker_flags {
	    case{
	      *" $tmp_inherited_linker_flag "* { }
	      * { func_append new_inherited_linker_flags " $tmp_inherited_linker_flag"}
	    }
	  }
	}
	setvar dependency_libs = $($ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)
	if test "$linkmode,$pass" = "lib,link" ||
	   test "$linkmode,$pass" = "prog,scan" ||
	   do { test $linkmode != prog && test $linkmode != lib; } {
	  test -n $dlopen && func_append dlfiles " $dlopen"
	  test -n $dlpreopen && func_append dlprefiles " $dlpreopen"
	}

	if test $pass = conv {
	  # Only check for convenience libraries
	  setvar deplibs = ""$lib $deplibs""
	  if test -z $libdir {
	    if test -z $old_library {
	      func_fatal_error "cannot find name of link library for \`$lib'"
	    }
	    # It is a libtool convenience library, so add in its objects.
	    func_append convenience " $ladir/$objdir/$old_library"
	    func_append old_convenience " $ladir/$objdir/$old_library"
	    setvar tmp_libs = ''
	    for deplib in $dependency_libs {
	      setvar deplibs = ""$deplib $deplibs""
	      if $opt_preserve_dup_deps  {
		case{
		*" $deplib "* { func_append specialdeplibs " $deplib" }
		}
	      }
	      func_append tmp_libs " $deplib"
	    }
	  } elif test $linkmode != prog && test $linkmode != lib {
	    func_fatal_error "\`$lib' is not a convenience library"
	  }
	  continue
	} # $pass = conv


	# Get the name of the library we link against.
	setvar linklib = ''
	if test -n $old_library &&
	   do { test $prefer_static_libs = yes ||
	     test "$prefer_static_libs,$installed" = "built,no"; } {
	  setvar linklib = "$old_library"
	} else {
	  for l in $old_library $library_names {
	    setvar linklib = "$l"
	  }
	}
	if test -z $linklib {
	  func_fatal_error "cannot find name of link library for \`$lib'"
	}

	# This library was specified with -dlopen.
	if test $pass = dlopen {
	  if test -z $libdir {
	    func_fatal_error "cannot -dlopen a convenience library: \`$lib'"
	  }
	  if test -z $dlname ||
	     test $dlopen_support != yes ||
	     test $build_libtool_libs = no {
	    # If there is no dlname, no dlopen support or we're linking
	    # statically, we need to preload.  We also need to preload any
	    # dependent libraries so libltdl's deplib preloader doesn't
	    # bomb out in the load deplibs phase.
	    func_append dlprefiles " $lib $dependency_libs"
	  } else {
	    func_append newdlfiles " $lib"
	  }
	  continue
	} # $pass = dlopen

	# We need an absolute path.
	case (ladir) {
	[\\/]* | [A-Za-z]:[\\/]* { setvar abs_ladir = "$ladir" }
	* {
	  setvar abs_ladir = $(cd $ladir && pwd)
	  if test -z $abs_ladir {
	    func_warning "cannot determine absolute directory name of \`$ladir'"
	    func_warning "passing it literally to the linker, although it might fail"
	    setvar abs_ladir = "$ladir"
	  }
	  }
	}
	func_basename $lib
	setvar laname = "$func_basename_result"

	# Find the relevant object directory and library name.
	if test "X$installed" = Xyes {
	  if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib" {
	    func_warning "library \`$lib' was moved."
	    setvar dir = "$ladir"
	    setvar absdir = "$abs_ladir"
	    setvar libdir = "$abs_ladir"
	  } else {
	    setvar dir = ""$lt_sysroot$libdir""
	    setvar absdir = ""$lt_sysroot$libdir""
	  }
	  test "X$hardcode_automatic" = Xyes && setvar avoidtemprpath = 'yes'
	} else {
	  if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib" {
	    setvar dir = "$ladir"
	    setvar absdir = "$abs_ladir"
	    # Remove this search path later
	    func_append notinst_path " $abs_ladir"
	  } else {
	    setvar dir = ""$ladir/$objdir""
	    setvar absdir = ""$abs_ladir/$objdir""
	    # Remove this search path later
	    func_append notinst_path " $abs_ladir"
	  }
	} # $installed = yes
	func_stripname 'lib' '.la' $laname
	setvar name = "$func_stripname_result"

	# This library was specified with -dlpreopen.
	if test $pass = dlpreopen {
	  if test -z $libdir && test $linkmode = prog {
	    func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'"
	  }
	  case (host) {
	    # special handling for platforms with PE-DLLs.
	    *cygwin* | *mingw* | *cegcc*  {
	      # Linker will automatically link against shared library if both
	      # static and shared are present.  Therefore, ensure we extract
	      # symbols from the import library if a shared library is present
	      # (otherwise, the dlopen module name will be incorrect).  We do
	      # this by putting the import library name into $newdlprefiles.
	      # We recover the dlopen module name by 'saving' the la file
	      # name in a special purpose variable, and (later) extracting the
	      # dlname from the la file.
	      if test -n $dlname {
	        func_tr_sh "$dir/$linklib"
	        eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname"
	        func_append newdlprefiles " $dir/$linklib"
	      } else {
	        func_append newdlprefiles " $dir/$old_library"
	        # Keep a list of preopened convenience libraries to check
	        # that they are being used correctly in the link pass.
	        test -z $libdir && \
	          func_append dlpreconveniencelibs " $dir/$old_library"
	      }
	    }
	    *  {
	      # Prefer using a static library (so that no silly _DYNAMIC symbols
	      # are required to link).
	      if test -n $old_library {
	        func_append newdlprefiles " $dir/$old_library"
	        # Keep a list of preopened convenience libraries to check
	        # that they are being used correctly in the link pass.
	        test -z $libdir && \
	          func_append dlpreconveniencelibs " $dir/$old_library"
	      # Otherwise, use the dlname, so that lt_dlopen finds it.
	      } elif test -n $dlname {
	        func_append newdlprefiles " $dir/$dlname"
	      } else {
	        func_append newdlprefiles " $dir/$linklib"
	      }
	    }
	  }
	} # $pass = dlpreopen

	if test -z $libdir {
	  # Link the convenience library
	  if test $linkmode = lib {
	    setvar deplibs = ""$dir/$old_library $deplibs""
	  } elif test "$linkmode,$pass" = "prog,link" {
	    setvar compile_deplibs = ""$dir/$old_library $compile_deplibs""
	    setvar finalize_deplibs = ""$dir/$old_library $finalize_deplibs""
	  } else {
	    setvar deplibs = ""$lib $deplibs"" # used for prog,scan pass
	  }
	  continue
	}


	if test $linkmode = prog && test $pass != link {
	  func_append newlib_search_path " $ladir"
	  setvar deplibs = ""$lib $deplibs""

	  setvar linkalldeplibs = 'no'
	  if test $link_all_deplibs != no || test -z $library_names ||
	     test $build_libtool_libs = no {
	    setvar linkalldeplibs = 'yes'
	  }

	  setvar tmp_libs = ''
	  for deplib in $dependency_libs {
	    case (deplib) {
	    -L* { func_stripname '-L' '' $deplib
	         func_resolve_sysroot $func_stripname_result
	         func_append newlib_search_path " $func_resolve_sysroot_result"
		 }
	    }
	    # Need to link against all dependency_libs?
	    if test $linkalldeplibs = yes {
	      setvar deplibs = ""$deplib $deplibs""
	    } else {
	      # Need to hardcode shared library paths
	      # or/and link against static libraries
	      setvar newdependency_libs = ""$deplib $newdependency_libs""
	    }
	    if $opt_preserve_dup_deps  {
	      case{
	      *" $deplib "* { func_append specialdeplibs " $deplib" }
	      }
	    }
	    func_append tmp_libs " $deplib"
	  } # for deplib
	  continue
	} # $linkmode = prog...

	if test "$linkmode,$pass" = "prog,link" {
	  if test -n $library_names &&
	     do { do { test $prefer_static_libs = no ||
	         test "$prefer_static_libs,$installed" = "built,yes"; } ||
	       test -z $old_library; } {
	    # We need to hardcode the library path
	    if test -n $shlibpath_var && test -z $avoidtemprpath  {
	      # Make sure the rpath contains only unique directories.
	      case{
	      *"$absdir:"* { }
	      * { func_append temp_rpath "$absdir:" }
	      }
	    }

	    # Hardcode the library path.
	    # Skip directories that are in the system default run-time
	    # search path.
	    case{
	    *" $absdir "* { }
	    * {
	      case{
	      *" $absdir "* { }
	      * { func_append compile_rpath " $absdir" }
	      }
	      }
	    }
	    case{
	    *" $libdir "* { }
	    * {
	      case{
	      *" $libdir "* { }
	      * { func_append finalize_rpath " $libdir" }
	      }
	      }
	    }
	  } # $linkmode,$pass = prog,link...

	  if test $alldeplibs = yes &&
	     do { test $deplibs_check_method = pass_all ||
	       do { test $build_libtool_libs = yes &&
		 test -n $library_names; }; } {
	    # We only need to search for static libraries
	    continue
	  }
	}

	setvar link_static = 'no' # Whether the deplib will be linked statically
	setvar use_static_libs = "$prefer_static_libs"
	if test $use_static_libs = built && test $installed = yes {
	  setvar use_static_libs = 'no'
	}
	if test -n $library_names &&
	   do { test $use_static_libs = no || test -z $old_library; } {
	  case (host) {
	  *cygwin* | *mingw* | *cegcc* {
	      # No point in relinking DLLs because paths are not encoded
	      func_append notinst_deplibs " $lib"
	      setvar need_relink = 'no'
	    }
	  * {
	    if test $installed = no {
	      func_append notinst_deplibs " $lib"
	      setvar need_relink = 'yes'
	    }
	    }
	  }
	  # This is a shared library

	  # Warn about portability, can't link against -module's on some
	  # systems (darwin).  Don't bleat about dlopened modules though!
	  setvar dlopenmodule = """"
	  for dlpremoduletest in $dlprefiles {
	    if test "X$dlpremoduletest" = "X$lib" {
	      setvar dlopenmodule = "$dlpremoduletest"
	      break
	    }
	  }
	  if test -z $dlopenmodule && test $shouldnotlink = yes && test $pass = link {
	    echo
	    if test $linkmode = prog {
	      $ECHO "*** Warning: Linking the executable $output against the loadable module"
	    } else {
	      $ECHO "*** Warning: Linking the shared library $output against the loadable module"
	    }
	    $ECHO "*** $linklib is not portable!"
	  }
	  if test $linkmode = lib &&
	     test $hardcode_into_libs = yes {
	    # Hardcode the library path.
	    # Skip directories that are in the system default run-time
	    # search path.
	    case{
	    *" $absdir "* { }
	    * {
	      case{
	      *" $absdir "* { }
	      * { func_append compile_rpath " $absdir" }
	      }
	      }
	    }
	    case{
	    *" $libdir "* { }
	    * {
	      case{
	      *" $libdir "* { }
	      * { func_append finalize_rpath " $libdir" }
	      }
	      }
	    }
	  }

	  if test -n $old_archive_from_expsyms_cmds {
	    # figure out the soname
	    set dummy $library_names
	    shift
	    setvar realname = "$1"
	    shift
	    setvar libname = $(eval "\\$ECHO \"$libname_spec)
	    # use dlname if we got it. it's perfectly good, no?
	    if test -n $dlname {
	      setvar soname = "$dlname"
	    } elif test -n $soname_spec {
	      # bleh windows
	      case (host) {
	      *cygwin* | mingw* | *cegcc* {
	        func_arith $current - $age
		setvar major = "$func_arith_result"
		setvar versuffix = ""-$major""
		}
	      }
	      eval soname='"'$soname_spec'"'
	    } else {
	      setvar soname = "$realname"
	    }

	    # Make a new name for the extract_expsyms_cmds to use
	    setvar soroot = "$soname"
	    func_basename $soroot
	    setvar soname = "$func_basename_result"
	    func_stripname 'lib' '.dll' $soname
	    setvar newlib = "libimp-$func_stripname_result.a"

	    # If the library has no export list, then create one now
	    if test -f "$output_objdir/$soname-def" { :
	    } else {
	      func_verbose "extracting exported symbol list from \`$soname'"
	      func_execute_cmds $extract_expsyms_cmds 'exit $?'
	    }

	    # Create $newlib
	    if test -f "$output_objdir/$newlib" { :; } else {
	      func_verbose "generating import library for \`$soname'"
	      func_execute_cmds $old_archive_from_expsyms_cmds 'exit $?'
	    }
	    # make sure the library variables are pointing to the new library
	    setvar dir = "$output_objdir"
	    setvar linklib = "$newlib"
	  } # test -n "$old_archive_from_expsyms_cmds"

	  if test $linkmode = prog || test $opt_mode != relink {
	    setvar add_shlibpath = ''
	    setvar add_dir = ''
	    setvar add = ''
	    setvar lib_linked = 'yes'
	    case (hardcode_action) {
	    immediate | unsupported {
	      if test $hardcode_direct = no {
		setvar add = ""$dir/$linklib""
		case (host) {
		  *-*-sco3.2v5.0.[024]* { setvar add_dir = ""-L$dir"" }
		  *-*-sysv4*uw2* { setvar add_dir = ""-L$dir"" }
		  *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
		    *-*-unixware7* { setvar add_dir = ""-L$dir"" }
		  *-*-darwin*  {
		    # if the lib is a (non-dlopened) module then we can not
		    # link against it, someone is ignoring the earlier warnings
		    if /usr/bin/file -L $add 2> /dev/null |
			 $GREP ": [^:]* bundle" >/dev/null  {
		      if test "X$dlopenmodule" != "X$lib" {
			$ECHO "*** Warning: lib $linklib is a module, not a shared library"
			if test -z $old_library  {
			  echo
			  echo "*** And there doesn't seem to be a static archive available"
			  echo "*** The link will probably fail, sorry"
			} else {
			  setvar add = ""$dir/$old_library""
			}
		      } elif test -n $old_library {
			setvar add = ""$dir/$old_library""
		      }
		    }
		}
}
	      } elif test $hardcode_minus_L = no {
		case (host) {
		*-*-sunos* { setvar add_shlibpath = "$dir" }
		}
		setvar add_dir = ""-L$dir""
		setvar add = ""-l$name""
	      } elif test $hardcode_shlibpath_var = no {
		setvar add_shlibpath = "$dir"
		setvar add = ""-l$name""
	      } else {
		setvar lib_linked = 'no'
	      }
	      }
	    relink {
	      if test $hardcode_direct = yes &&
	         test $hardcode_direct_absolute = no {
		setvar add = ""$dir/$linklib""
	      } elif test $hardcode_minus_L = yes {
		setvar add_dir = ""-L$absdir""
		# Try looking first in the location we're being installed to.
		if test -n $inst_prefix_dir {
		  case (libdir) {
		    [\\/]* {
		      func_append add_dir " -L$inst_prefix_dir$libdir"
		      }
		  }
		}
		setvar add = ""-l$name""
	      } elif test $hardcode_shlibpath_var = yes {
		setvar add_shlibpath = "$dir"
		setvar add = ""-l$name""
	      } else {
		setvar lib_linked = 'no'
	      }
	      }
	    * { setvar lib_linked = 'no' }
	    }

	    if test $lib_linked != yes {
	      func_fatal_configuration "unsupported hardcode properties"
	    }

	    if test -n $add_shlibpath {
	      case{
	      *":$add_shlibpath:"* { }
	      * { func_append compile_shlibpath "$add_shlibpath:" }
	      }
	    }
	    if test $linkmode = prog {
	      test -n $add_dir && setvar compile_deplibs = ""$add_dir $compile_deplibs""
	      test -n $add && setvar compile_deplibs = ""$add $compile_deplibs""
	    } else {
	      test -n $add_dir && setvar deplibs = ""$add_dir $deplibs""
	      test -n $add && setvar deplibs = ""$add $deplibs""
	      if test $hardcode_direct != yes &&
		 test $hardcode_minus_L != yes &&
		 test $hardcode_shlibpath_var = yes {
		case{
		*":$libdir:"* { }
		* { func_append finalize_shlibpath "$libdir:" }
		}
	      }
	    }
	  }

	  if test $linkmode = prog || test $opt_mode = relink {
	    setvar add_shlibpath = ''
	    setvar add_dir = ''
	    setvar add = ''
	    # Finalize command for both is simple: just hardcode it.
	    if test $hardcode_direct = yes &&
	       test $hardcode_direct_absolute = no {
	      setvar add = ""$libdir/$linklib""
	    } elif test $hardcode_minus_L = yes {
	      setvar add_dir = ""-L$libdir""
	      setvar add = ""-l$name""
	    } elif test $hardcode_shlibpath_var = yes {
	      case{
	      *":$libdir:"* { }
	      * { func_append finalize_shlibpath "$libdir:" }
	      }
	      setvar add = ""-l$name""
	    } elif test $hardcode_automatic = yes {
	      if test -n $inst_prefix_dir &&
		 test -f "$inst_prefix_dir$libdir/$linklib"  {
		setvar add = ""$inst_prefix_dir$libdir/$linklib""
	      } else {
		setvar add = ""$libdir/$linklib""
	      }
	    } else {
	      # We cannot seem to hardcode it, guess we'll fake it.
	      setvar add_dir = ""-L$libdir""
	      # Try looking first in the location we're being installed to.
	      if test -n $inst_prefix_dir {
		case (libdir) {
		  [\\/]* {
		    func_append add_dir " -L$inst_prefix_dir$libdir"
		    }
		}
	      }
	      setvar add = ""-l$name""
	    }

	    if test $linkmode = prog {
	      test -n $add_dir && setvar finalize_deplibs = ""$add_dir $finalize_deplibs""
	      test -n $add && setvar finalize_deplibs = ""$add $finalize_deplibs""
	    } else {
	      test -n $add_dir && setvar deplibs = ""$add_dir $deplibs""
	      test -n $add && setvar deplibs = ""$add $deplibs""
	    }
	  }
	} elif test $linkmode = prog {
	  # Here we assume that one of hardcode_direct or hardcode_minus_L
	  # is not unsupported.  This is valid on all known static and
	  # shared platforms.
	  if test $hardcode_direct != unsupported {
	    test -n $old_library && setvar linklib = "$old_library"
	    setvar compile_deplibs = ""$dir/$linklib $compile_deplibs""
	    setvar finalize_deplibs = ""$dir/$linklib $finalize_deplibs""
	  } else {
	    setvar compile_deplibs = ""-l$name -L$dir $compile_deplibs""
	    setvar finalize_deplibs = ""-l$name -L$dir $finalize_deplibs""
	  }
	} elif test $build_libtool_libs = yes {
	  # Not a shared library
	  if test $deplibs_check_method != pass_all {
	    # We're trying link a shared library against a static one
	    # but the system doesn't support it.

	    # Just print a warning and add the library to dependency_libs so
	    # that the program can be linked against the static library.
	    echo
	    $ECHO "*** Warning: This system can not link to static lib archive $lib."
	    echo "*** I have the capability to make that library automatically link in when"
	    echo "*** you link to this library.  But I can only do this if you have a"
	    echo "*** shared version of the library, which you do not appear to have."
	    if test $module = yes {
	      echo "*** But as you try to build a module library, libtool will still create "
	      echo "*** a static module, that should work as long as the dlopening application"
	      echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
	      if test -z $global_symbol_pipe {
		echo
		echo "*** However, this would only work if libtool was able to extract symbol"
		echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
		echo "*** not find such a program.  So, this module is probably useless."
		echo "*** \`nm' from GNU binutils and a full rebuild may help."
	      }
	      if test $build_old_libs = no {
		setvar build_libtool_libs = 'module'
		setvar build_old_libs = 'yes'
	      } else {
		setvar build_libtool_libs = 'no'
	      }
	    }
	  } else {
	    setvar deplibs = ""$dir/$old_library $deplibs""
	    setvar link_static = 'yes'
	  }
	} # link shared/static library?

	if test $linkmode = lib {
	  if test -n $dependency_libs &&
	     do { test $hardcode_into_libs != yes ||
	       test $build_old_libs = yes ||
	       test $link_static = yes; } {
	    # Extract -R from dependency_libs
	    setvar temp_deplibs = ''
	    for libdir in $dependency_libs {
	      case (libdir) {
	      -R* { func_stripname '-R' '' $libdir
	           setvar temp_xrpath = "$func_stripname_result"
		   case{
		   *" $temp_xrpath "* { }
		   * { func_append xrpath " $temp_xrpath"}
		   }}
	      * { func_append temp_deplibs " $libdir"}
	      }
	    }
	    setvar dependency_libs = "$temp_deplibs"
	  }

	  func_append newlib_search_path " $absdir"
	  # Link against this library
	  test $link_static = no && setvar newdependency_libs = ""$abs_ladir/$laname $newdependency_libs""
	  # ... and its dependency_libs
	  setvar tmp_libs = ''
	  for deplib in $dependency_libs {
	    setvar newdependency_libs = ""$deplib $newdependency_libs""
	    case (deplib) {
              -L* { func_stripname '-L' '' $deplib
                   func_resolve_sysroot $func_stripname_result}
              * { func_resolve_sysroot $deplib }
            }
	    if $opt_preserve_dup_deps  {
	      case{
	      *" $func_resolve_sysroot_result "* {
                func_append specialdeplibs " $func_resolve_sysroot_result" }
	      }
	    }
	    func_append tmp_libs " $func_resolve_sysroot_result"
	  }

	  if test $link_all_deplibs != no {
	    # Add the search paths of all dependency libraries
	    for deplib in $dependency_libs {
	      setvar path = ''
	      case (deplib) {
	      -L* { setvar path = "$deplib" }
	      *.la {
	        func_resolve_sysroot $deplib
	        setvar deplib = "$func_resolve_sysroot_result"
	        func_dirname $deplib "" "."
		setvar dir = "$func_dirname_result"
		# We need an absolute path.
		case (dir) {
		[\\/]* | [A-Za-z]:[\\/]* { setvar absdir = "$dir" }
		* {
		  setvar absdir = $(cd $dir && pwd)
		  if test -z $absdir {
		    func_warning "cannot determine absolute directory name of \`$dir'"
		    setvar absdir = "$dir"
		  }
		  }
		}
		if $GREP "^installed=no" $deplib > /dev/null {
		case (host) {
		*-*-darwin* {
		  setvar depdepl = ''
		  eval deplibrary_names=$(${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib)
		  if test -n $deplibrary_names  {
		    for tmp in $deplibrary_names  {
		      setvar depdepl = "$tmp"
		    }
		    if test -f "$absdir/$objdir/$depdepl"  {
		      setvar depdepl = ""$absdir/$objdir/$depdepl""
		      setvar darwin_install_name = $(${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}})
                      if test -z $darwin_install_name {
                          setvar darwin_install_name = $(${OTOOL64} -L $depdepl  | awk '{if (NR == 2) {print $1;exit}})
                      }
		      func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
		      func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}"
		      setvar path = ''
		    }
		  }
		  }
		* {
		  setvar path = ""-L$absdir/$objdir""
		  }
		}
		} else {
		  eval libdir=$(${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib)
		  test -z $libdir && \
		    func_fatal_error "\`$deplib' is not a valid libtool archive"
		  test $absdir != $libdir && \
		    func_warning "\`$deplib' seems to be moved"

		  setvar path = ""-L$absdir""
		}
		}
	      }
	      case{
	      *" $path "* { }
	      * { setvar deplibs = ""$path $deplibs"" }
	      }
	    }
	  } # link_all_deplibs != no
	} # linkmode = lib
      } # for deplib in $libs
      if test $pass = link {
	if test $linkmode = "prog" {
	  setvar compile_deplibs = ""$new_inherited_linker_flags $compile_deplibs""
	  setvar finalize_deplibs = ""$new_inherited_linker_flags $finalize_deplibs""
	} else {
	  setvar compiler_flags = ""$compiler_flags "$($ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)"
	}
      }
      setvar dependency_libs = "$newdependency_libs"
      if test $pass = dlpreopen {
	# Link the dlpreopened libraries before other libraries
	for deplib in $save_deplibs {
	  setvar deplibs = ""$deplib $deplibs""
	}
      }
      if test $pass != dlopen {
	if test $pass != conv {
	  # Make sure lib_search_path contains only unique directories.
	  setvar lib_search_path = ''
	  for dir in $newlib_search_path {
	    case{
	    *" $dir "* { }
	    * { func_append lib_search_path " $dir" }
	    }
	  }
	  setvar newlib_search_path = ''
	}

	if test "$linkmode,$pass" != "prog,link" {
	  setvar vars = ""deplibs""
	} else {
	  setvar vars = ""compile_deplibs finalize_deplibs""
	}
	for var in $vars dependency_libs {
	  # Add libraries to $var in reverse order
	  eval tmp_libs='"''$'$var'"'
	  setvar new_libs = ''
	  for deplib in $tmp_libs {
	    # FIXME: Pedantically, this is the right thing to do, so
	    #        that some nasty dependency loop isn't accidentally
	    #        broken:
	    #new_libs="$deplib $new_libs"
	    # Pragmatically, this seems to cause very few problems in
	    # practice:
	    case (deplib) {
	    -L* { setvar new_libs = ""$deplib $new_libs"" }
	    -R* { }
	    * {
	      # And here is the reason: when a library appears more
	      # than once as an explicit dependence of a library, or
	      # is implicitly linked in more than once by the
	      # compiler, it is considered special, and multiple
	      # occurrences thereof are not removed.  Compare this
	      # with having the same library being listed as a
	      # dependency of multiple other libraries: in this case,
	      # we know (pedantically, we assume) the library does not
	      # need to be listed more than once, so we keep only the
	      # last copy.  This is not always right, but it is rare
	      # enough that we require users that really mean to play
	      # such unportable linking tricks to link the library
	      # using -Wl,-lname, so that libtool does not consider it
	      # for duplicate removal.
	      case{
	      *" $deplib "* { setvar new_libs = ""$deplib $new_libs"" }
	      * {
		case{
		*" $deplib "* { }
		* { setvar new_libs = ""$deplib $new_libs"" }
		}
		}
	      }
	      }
	    }
	  }
	  setvar tmp_libs = ''
	  for deplib in $new_libs {
	    case (deplib) {
	    -L* {
	      case{
	      *" $deplib "* { }
	      * { func_append tmp_libs " $deplib" }
	      }
	      }
	    * { func_append tmp_libs " $deplib" }
	    }
	  }
	  eval $var='"'$tmp_libs'"'
	} # for var
      }
      # Last step: remove runtime libs from dependency_libs
      # (they stay in deplibs)
      setvar tmp_libs = ''
      for i in $dependency_libs  {
	case{
	*" $i "* {
	  setvar i = """"
	  }
	}
	if test -n $i  {
	  func_append tmp_libs " $i"
	}
      }
      setvar dependency_libs = "$tmp_libs"
    } # for pass
    if test $linkmode = prog {
      setvar dlfiles = "$newdlfiles"
    }
    if test $linkmode = prog || test $linkmode = lib {
      setvar dlprefiles = "$newdlprefiles"
    }

    case (linkmode) {
    oldlib {
      if test -n "$dlfiles$dlprefiles" || test $dlself != no {
	func_warning "\`-dlopen' is ignored for archives"
      }

      case{
      *\ -l* | *\ -L* {
	func_warning "\`-l' and \`-L' are ignored for archives" }
      }

      test -n $rpath && \
	func_warning "\`-rpath' is ignored for archives"

      test -n $xrpath && \
	func_warning "\`-R' is ignored for archives"

      test -n $vinfo && \
	func_warning "\`-version-info/-version-number' is ignored for archives"

      test -n $release && \
	func_warning "\`-release' is ignored for archives"

      test -n "$export_symbols$export_symbols_regex" && \
	func_warning "\`-export-symbols' is ignored for archives"

      # Now set the variables for building old libraries.
      setvar build_libtool_libs = 'no'
      setvar oldlibs = "$output"
      func_append objs $old_deplibs
      }

    lib {
      # Make sure we only generate libraries of the form `libNAME.la'.
      case (outputname) {
      lib* {
	func_stripname 'lib' '.la' $outputname
	setvar name = "$func_stripname_result"
	eval shared_ext='"'$shrext_cmds'"'
	eval libname='"'$libname_spec'"'
	}
      * {
	test $module = no && \
	  func_fatal_help "libtool library \`$output' must begin with \`lib'"

	if test $need_lib_prefix != no {
	  # Add the "lib" prefix for modules if required
	  func_stripname '' '.la' $outputname
	  setvar name = "$func_stripname_result"
	  eval shared_ext='"'$shrext_cmds'"'
	  eval libname='"'$libname_spec'"'
	} else {
	  func_stripname '' '.la' $outputname
	  setvar libname = "$func_stripname_result"
	}
	}
      }

      if test -n $objs {
	if test $deplibs_check_method != pass_all {
	  func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs"
	} else {
	  echo
	  $ECHO "*** Warning: Linking the shared library $output against the non-libtool"
	  $ECHO "*** objects $objs is not portable!"
	  func_append libobjs " $objs"
	}
      }

      test $dlself != no && \
	func_warning "\`-dlopen self' is ignored for libtool libraries"

      set dummy $rpath
      shift
      test "$Argc" -gt 1 && \
	func_warning "ignoring multiple \`-rpath's for a libtool library"

      setvar install_libdir = "$1"

      setvar oldlibs = ''
      if test -z $rpath {
	if test $build_libtool_libs = yes {
	  # Building a libtool convenience library.
	  # Some compilers have problems with a `.al' extension so
	  # convenience libraries should have the same extension an
	  # archive normally would.
	  setvar oldlibs = ""$output_objdir/$libname.$libext $oldlibs""
	  setvar build_libtool_libs = 'convenience'
	  setvar build_old_libs = 'yes'
	}

	test -n $vinfo && \
	  func_warning "\`-version-info/-version-number' is ignored for convenience libraries"

	test -n $release && \
	  func_warning "\`-release' is ignored for convenience libraries"
      } else {

	# Parse the version information argument.
	setvar save_ifs = "$IFS"; setvar IFS = '':''
	set dummy $vinfo 0 0 0
	shift
	setvar IFS = "$save_ifs"

	test -n $7 && \
	  func_fatal_help "too many parameters to \`-version-info'"

	# convert absolute version numbers to libtool ages
	# this retains compatibility with .la files and attempts
	# to make the code below a bit more comprehensible

	case (vinfo_number) {
	yes {
	  setvar number_major = "$1"
	  setvar number_minor = "$2"
	  setvar number_revision = "$3"
	  #
	  # There are really only two kinds -- those that
	  # use the current revision as the major version
	  # and those that subtract age and use age as
	  # a minor version.  But, then there is irix
	  # which has an extra 1 added just for fun
	  #
	  case (version_type) {
	  # correct linux to gnu/linux during the next big refactor
	  darwin|linux|osf|windows|none {
	    func_arith $number_major + $number_minor
	    setvar current = "$func_arith_result"
	    setvar age = "$number_minor"
	    setvar revision = "$number_revision"
	    }
	  freebsd-aout|freebsd-elf|qnx|sunos {
	    setvar current = "$number_major"
	    setvar revision = "$number_minor"
	    setvar age = ""0""
	    }
	  irix|nonstopux {
	    func_arith $number_major + $number_minor
	    setvar current = "$func_arith_result"
	    setvar age = "$number_minor"
	    setvar revision = "$number_minor"
	    setvar lt_irix_increment = 'no'
	    }
	  * {
	    func_fatal_configuration "$modename: unknown library version type \`$version_type'"
	    }
	  }
	  }
	no {
	  setvar current = "$1"
	  setvar revision = "$2"
	  setvar age = "$3"
	  }
	}

	# Check that each of the things are valid numbers.
	case (current) {
	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9] { }
	* {
	  func_error "CURRENT \`$current' must be a nonnegative integer"
	  func_fatal_error "\`$vinfo' is not valid version information"
	  }
	}

	case (revision) {
	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9] { }
	* {
	  func_error "REVISION \`$revision' must be a nonnegative integer"
	  func_fatal_error "\`$vinfo' is not valid version information"
	  }
	}

	case (age) {
	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9] { }
	* {
	  func_error "AGE \`$age' must be a nonnegative integer"
	  func_fatal_error "\`$vinfo' is not valid version information"
	  }
	}

	if test $age -gt $current {
	  func_error "AGE \`$age' is greater than the current interface number \`$current'"
	  func_fatal_error "\`$vinfo' is not valid version information"
	}

	# Calculate the version variables.
	setvar major = ''
	setvar versuffix = ''
	setvar verstring = ''
	case (version_type) {
	none { }

	darwin {
	  # Like Linux, but with the current version available in
	  # verstring for coding it into the library header
	  func_arith $current - $age
	  setvar major = ".$func_arith_result"
	  setvar versuffix = ""$major.$age.$revision""
	  # Darwin ld doesn't like 0 for these options...
	  func_arith $current + 1
	  setvar minor_current = "$func_arith_result"
	  setvar xlcverstring = ""${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision""
	  setvar verstring = ""-compatibility_version $minor_current -current_version $minor_current.$revision""
	  }

	freebsd-aout {
	  setvar major = "".$current""
	  setvar versuffix = "".$current.$revision"";
	  }

	freebsd-elf {
	  setvar major = "".$current""
	  setvar versuffix = "".$current""
	  }

	irix | nonstopux {
	  if test "X$lt_irix_increment" = "Xno" {
	    func_arith $current - $age
	  } else {
	    func_arith $current - $age + 1
	  }
	  setvar major = "$func_arith_result"

	  case (version_type) {
	    nonstopux { setvar verstring_prefix = 'nonstopux' }
	    * {         setvar verstring_prefix = 'sgi' }
	  }
	  setvar verstring = ""$verstring_prefix$major.$revision""

	  # Add in all the interfaces that we are compatible with.
	  setvar loop = "$revision"
	  while test $loop -ne 0 {
	    func_arith $revision - $loop
	    setvar iface = "$func_arith_result"
	    func_arith $loop - 1
	    setvar loop = "$func_arith_result"
	    setvar verstring = ""$verstring_prefix$major.$iface:$verstring""
	  }

	  # Before this point, $major must not contain `.'.
	  setvar major = ".$major"
	  setvar versuffix = ""$major.$revision""
	  }

	linux { # correct to gnu/linux during the next big refactor
	  func_arith $current - $age
	  setvar major = ".$func_arith_result"
	  setvar versuffix = ""$major.$age.$revision""
	  }

	osf {
	  func_arith $current - $age
	  setvar major = ".$func_arith_result"
	  setvar versuffix = "".$current.$age.$revision""
	  setvar verstring = ""$current.$age.$revision""

	  # Add in all the interfaces that we are compatible with.
	  setvar loop = "$age"
	  while test $loop -ne 0 {
	    func_arith $current - $loop
	    setvar iface = "$func_arith_result"
	    func_arith $loop - 1
	    setvar loop = "$func_arith_result"
	    setvar verstring = ""$verstring:${iface}.0""
	  }

	  # Make executables depend on our current version.
	  func_append verstring ":${current}.0"
	  }

	qnx {
	  setvar major = "".$current""
	  setvar versuffix = "".$current""
	  }

	sunos {
	  setvar major = "".$current""
	  setvar versuffix = "".$current.$revision""
	  }

	windows {
	  # Use '-' rather than '.', since we only want one
	  # extension on DOS 8.3 filesystems.
	  func_arith $current - $age
	  setvar major = "$func_arith_result"
	  setvar versuffix = ""-$major""
	  }

	* {
	  func_fatal_configuration "unknown library version type \`$version_type'"
	  }
	}

	# Clear the version info if we defaulted, and they specified a release.
	if test -z $vinfo && test -n $release {
	  setvar major = ''
	  case (version_type) {
	  darwin {
	    # we can't check for "0.0" in archive_cmds due to quoting
	    # problems, so we reset it completely
	    setvar verstring = ''
	    }
	  * {
	    setvar verstring = ""0.0""
	    }
	  }
	  if test $need_version = no {
	    setvar versuffix = ''
	  } else {
	    setvar versuffix = "".0.0""
	  }
	}

	# Remove version info from name if versioning should be avoided
	if test $avoid_version = yes && test $need_version = no {
	  setvar major = ''
	  setvar versuffix = ''
	  setvar verstring = """"
	}

	# Check to see if the archive will have undefined symbols.
	if test $allow_undefined = yes {
	  if test $allow_undefined_flag = unsupported {
	    func_warning "undefined symbols not allowed in $host shared libraries"
	    setvar build_libtool_libs = 'no'
	    setvar build_old_libs = 'yes'
	  }
	} else {
	  # Don't allow undefined symbols.
	  setvar allow_undefined_flag = "$no_undefined_flag"
	}

      }

      func_generate_dlsyms $libname $libname "yes"
      func_append libobjs " $symfileobj"
      test "X$libobjs" = "X " && setvar libobjs = ''

      if test $opt_mode != relink {
	# Remove our outputs, but don't remove object files since they
	# may have been created when compiling PIC objects.
	setvar removelist = ''
	setvar tempremovelist = $($ECHO "$output_objdir/*)
	for p in $tempremovelist {
	  case (p) {
	    *.$objext | *.gcno {
	       }
	    $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.* {
	       if test "X$precious_files_regex" != "X" {
		 if $ECHO $p | $EGREP -e $precious_files_regex >/dev/null 2>&1
		 {
		   continue
		 }
	       }
	       func_append removelist " $p"
	       }
	    * { }
	  }
	}
	test -n $removelist && \
	  func_show_eval "${RM}r \$removelist"
      }

      # Now set the variables for building old libraries.
      if test $build_old_libs = yes && test $build_libtool_libs != convenience  {
	func_append oldlibs " $output_objdir/$libname.$libext"

	# Transform .lo files to .o files.
	setvar oldobjs = ""$objs "$($ECHO $libobjs | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP)"
      }

      # Eliminate all temporary directories.
      #for path in $notinst_path; do
      #	lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"`
      #	deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"`
      #	dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"`
      #done

      if test -n $xrpath {
	# If the user specified any rpath flags, then add them.
	setvar temp_xrpath = ''
	for libdir in $xrpath {
	  func_replace_sysroot $libdir
	  func_append temp_xrpath " -R$func_replace_sysroot_result"
	  case{
	  *" $libdir "* { }
	  * { func_append finalize_rpath " $libdir" }
	  }
	}
	if test $hardcode_into_libs != yes || test $build_old_libs = yes {
	  setvar dependency_libs = ""$temp_xrpath $dependency_libs""
	}
      }

      # Make sure dlfiles contains only unique files that won't be dlpreopened
      setvar old_dlfiles = "$dlfiles"
      setvar dlfiles = ''
      for lib in $old_dlfiles {
	case{
	*" $lib "* { }
	* { func_append dlfiles " $lib" }
	}
      }

      # Make sure dlprefiles contains only unique files
      setvar old_dlprefiles = "$dlprefiles"
      setvar dlprefiles = ''
      for lib in $old_dlprefiles {
	case{
	*" $lib "* { }
	* { func_append dlprefiles " $lib" }
	}
      }

      if test $build_libtool_libs = yes {
	if test -n $rpath {
	  case (host) {
	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku* {
	    # these systems don't actually have a c library (as such)!
	    }
	  *-*-rhapsody* | *-*-darwin1.[012] {
	    # Rhapsody C library is in the System framework
	    func_append deplibs " System.ltframework"
	    }
	  *-*-netbsd* {
	    # Don't link with libc until the a.out ld.so is fixed.
	    }
	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* {
	    # Do not include libc due to us having libc/libc_r.
	    }
	  *-*-sco3.2v5* | *-*-sco5v6* {
	    # Causes problems with __ctype
	    }
	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX* {
	    # Compiler inserts libc in the correct place for threads to work
	    }
	  * {
	    # Add libc to deplibs on all other systems if necessary.
	    if test $build_libtool_need_lc = "yes" {
	      func_append deplibs " -lc"
	    }
	    }
	  }
	}

	# Transform deplibs into only deplibs that can be linked in shared.
	setvar name_save = "$name"
	setvar libname_save = "$libname"
	setvar release_save = "$release"
	setvar versuffix_save = "$versuffix"
	setvar major_save = "$major"
	# I'm not sure if I'm treating the release correctly.  I think
	# release should show up in the -l (ie -lgmp5) so we don't want to
	# add it in twice.  Is that correct?
	setvar release = """"
	setvar versuffix = """"
	setvar major = """"
	setvar newdeplibs = ''
	setvar droppeddeps = 'no'
	case (deplibs_check_method) {
	pass_all {
	  # Don't check for shared/static.  Everything works.
	  # This might be a little naive.  We might want to check
	  # whether the library exists or not.  But this is on
	  # osf3 & osf4 and I'm not really sure... Just
	  # implementing what was already the behavior.
	  setvar newdeplibs = "$deplibs"
	  }
	test_compile {
	  # This code stresses the "libraries are programs" paradigm to its
	  # limits. Maybe even breaks it.  We compile a program, linking it
	  # against the deplibs as a proxy for the library.  Then we can check
	  # whether they linked in statically or dynamically with ldd.
	  $opt_dry_run || $RM conftest.c
	  cat > conftest.c <<< """
	  int main() { return 0; }
"""
	  $opt_dry_run || $RM conftest
	  if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs {
	    setvar ldd_output = $(ldd conftest)
	    for i in $deplibs {
	      case (i) {
	      -l* {
		func_stripname -l '' $i
		setvar name = "$func_stripname_result"
		if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes"  {
		  case{
		  *" $i "* {
		    func_append newdeplibs " $i"
		    setvar i = """"
		    }
		  }
		}
		if test -n $i  {
		  setvar libname = $(eval "\\$ECHO \"$libname_spec)
		  setvar deplib_matches = $(eval "\\$ECHO \"$library_names_spec)
		  set dummy $deplib_matches; shift
		  setvar deplib_match = "$1"
		  if test $(expr $ldd_output : ".*$deplib_match) -ne 0  {
		    func_append newdeplibs " $i"
		  } else {
		    setvar droppeddeps = 'yes'
		    echo
		    $ECHO "*** Warning: dynamic linker does not accept needed library $i."
		    echo "*** I have the capability to make that library automatically link in when"
		    echo "*** you link to this library.  But I can only do this if you have a"
		    echo "*** shared version of the library, which I believe you do not have"
		    echo "*** because a test_compile did reveal that the linker did not use it for"
		    echo "*** its dynamic dependency list that programs get resolved with at runtime."
		  }
		}
		}
	      * {
		func_append newdeplibs " $i"
		}
	      }
	    }
	  } else {
	    # Error occurred in the first compile.  Let's try to salvage
	    # the situation: Compile a separate program for each library.
	    for i in $deplibs {
	      case (i) {
	      -l* {
		func_stripname -l '' $i
		setvar name = "$func_stripname_result"
		$opt_dry_run || $RM conftest
		if $LTCC $LTCFLAGS -o conftest conftest.c $i {
		  setvar ldd_output = $(ldd conftest)
		  if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes"  {
		    case{
		    *" $i "* {
		      func_append newdeplibs " $i"
		      setvar i = """"
		      }
		    }
		  }
		  if test -n $i  {
		    setvar libname = $(eval "\\$ECHO \"$libname_spec)
		    setvar deplib_matches = $(eval "\\$ECHO \"$library_names_spec)
		    set dummy $deplib_matches; shift
		    setvar deplib_match = "$1"
		    if test $(expr $ldd_output : ".*$deplib_match) -ne 0  {
		      func_append newdeplibs " $i"
		    } else {
		      setvar droppeddeps = 'yes'
		      echo
		      $ECHO "*** Warning: dynamic linker does not accept needed library $i."
		      echo "*** I have the capability to make that library automatically link in when"
		      echo "*** you link to this library.  But I can only do this if you have a"
		      echo "*** shared version of the library, which you do not appear to have"
		      echo "*** because a test_compile did reveal that the linker did not use this one"
		      echo "*** as a dynamic dependency that programs can get resolved with at runtime."
		    }
		  }
		} else {
		  setvar droppeddeps = 'yes'
		  echo
		  $ECHO "*** Warning!  Library $i is needed by this library but I was not able to"
		  echo "*** make it link in!  You will probably need to install it or some"
		  echo "*** library that it depends on before this library will be fully"
		  echo "*** functional.  Installing it before continuing would be even better."
		}
		}
	      * {
		func_append newdeplibs " $i"
		}
	      }
	    }
	  }
	  }
	file_magic* {
	  set dummy $deplibs_check_method; shift
	  setvar file_magic_regex = $(expr $deplibs_check_method : "$1 \(.*\))
	  for a_deplib in $deplibs {
	    case (a_deplib) {
	    -l* {
	      func_stripname -l '' $a_deplib
	      setvar name = "$func_stripname_result"
	      if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes"  {
		case{
		*" $a_deplib "* {
		  func_append newdeplibs " $a_deplib"
		  setvar a_deplib = """"
		  }
		}
	      }
	      if test -n $a_deplib  {
		setvar libname = $(eval "\\$ECHO \"$libname_spec)
		if test -n $file_magic_glob {
		  setvar libnameglob = $(func_echo_all $libname | $SED -e $file_magic_glob)
		} else {
		  setvar libnameglob = "$libname"
		}
		test $want_nocaseglob = yes && setvar nocaseglob = $(shopt -p nocaseglob)
		for i in $lib_search_path $sys_lib_search_path $shlib_search_path {
		  if test $want_nocaseglob = yes {
		    shopt -s nocaseglob
		    setvar potential_libs = $(ls $i/$libnameglob[.-]* )
		    $nocaseglob
		  } else {
		    setvar potential_libs = $(ls $i/$libnameglob[.-]* )
		  }
		  for potent_lib in $potential_libs {
		      # Follow soft links.
		      if ls -lLd $potent_lib 2>/dev/null |
			 $GREP " -> " >/dev/null {
			continue
		      }
		      # The statement above tries to avoid entering an
		      # endless loop below, in case of cyclic links.
		      # We might still enter an endless loop, since a link
		      # loop can be closed while we follow links,
		      # but so what?
		      setvar potlib = "$potent_lib"
		      while test -h $potlib  {
			setvar potliblink = $(ls -ld $potlib | ${SED} 's/.* -> //)
			case (potliblink) {
			[\\/]* | [A-Za-z]:[\\/]* { setvar potlib = "$potliblink"}
			* { setvar potlib = "$($ECHO $potlib | $SED 's,[^/]*$,,)"$potliblink""}
			}
		      }
		      if eval $file_magic_cmd '"''$'potlib'"' 2>/dev/null |
			 $SED -e 10q |
			 $EGREP $file_magic_regex > /dev/null {
			func_append newdeplibs " $a_deplib"
			setvar a_deplib = """"
			break 2
		      }
		  }
		}
	      }
	      if test -n $a_deplib  {
		setvar droppeddeps = 'yes'
		echo
		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
		echo "*** I have the capability to make that library automatically link in when"
		echo "*** you link to this library.  But I can only do this if you have a"
		echo "*** shared version of the library, which you do not appear to have"
		echo "*** because I did check the linker path looking for a file starting"
		if test -z $potlib  {
		  $ECHO "*** with $libname but no candidates were found. (...for file magic test)"
		} else {
		  $ECHO "*** with $libname and none of the candidates passed a file format test"
		  $ECHO "*** using a file magic. Last file checked: $potlib"
		}
	      }
	      }
	    * {
	      # Add a -L argument.
	      func_append newdeplibs " $a_deplib"
	      }
	    }
	  } # Gone through all deplibs.
	  }
	match_pattern* {
	  set dummy $deplibs_check_method; shift
	  setvar match_pattern_regex = $(expr $deplibs_check_method : "$1 \(.*\))
	  for a_deplib in $deplibs {
	    case (a_deplib) {
	    -l* {
	      func_stripname -l '' $a_deplib
	      setvar name = "$func_stripname_result"
	      if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes"  {
		case{
		*" $a_deplib "* {
		  func_append newdeplibs " $a_deplib"
		  setvar a_deplib = """"
		  }
		}
	      }
	      if test -n $a_deplib  {
		setvar libname = $(eval "\\$ECHO \"$libname_spec)
		for i in $lib_search_path $sys_lib_search_path $shlib_search_path {
		  setvar potential_libs = $(ls $i/$libname[.-]* )
		  for potent_lib in $potential_libs {
		    setvar potlib = "$potent_lib" # see symlink-check above in file_magic test
		    if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \
		       $EGREP $match_pattern_regex > /dev/null {
		      func_append newdeplibs " $a_deplib"
		      setvar a_deplib = """"
		      break 2
		    }
		  }
		}
	      }
	      if test -n $a_deplib  {
		setvar droppeddeps = 'yes'
		echo
		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
		echo "*** I have the capability to make that library automatically link in when"
		echo "*** you link to this library.  But I can only do this if you have a"
		echo "*** shared version of the library, which you do not appear to have"
		echo "*** because I did check the linker path looking for a file starting"
		if test -z $potlib  {
		  $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
		} else {
		  $ECHO "*** with $libname and none of the candidates passed a file format test"
		  $ECHO "*** using a regex pattern. Last file checked: $potlib"
		}
	      }
	      }
	    * {
	      # Add a -L argument.
	      func_append newdeplibs " $a_deplib"
	      }
	    }
	  } # Gone through all deplibs.
	  }
	none | unknown | * {
	  setvar newdeplibs = """"
	  setvar tmp_deplibs = $($ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g)
	  if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes"  {
	    for i in $predeps $postdeps  {
	      # can't use Xsed below, because $i might contain '/'
	      setvar tmp_deplibs = $($ECHO " $tmp_deplibs" | $SED "s,$i,,)
	    }
	  }
	  case (tmp_deplibs) {
	  *[!\	\ ]* {
	    echo
	    if test "X$deplibs_check_method" = "Xnone" {
	      echo "*** Warning: inter-library dependencies are not supported in this platform."
	    } else {
	      echo "*** Warning: inter-library dependencies are not known to be supported."
	    }
	    echo "*** All declared inter-library dependencies are being dropped."
	    setvar droppeddeps = 'yes'
	    }
	  }
	  }
	}
	setvar versuffix = "$versuffix_save"
	setvar major = "$major_save"
	setvar release = "$release_save"
	setvar libname = "$libname_save"
	setvar name = "$name_save"

	case (host) {
	*-*-rhapsody* | *-*-darwin1.[012] {
	  # On Rhapsody replace the C library with the System framework
	  setvar newdeplibs = $($ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /)
	  }
	}

	if test $droppeddeps = yes {
	  if test $module = yes {
	    echo
	    echo "*** Warning: libtool could not satisfy all declared inter-library"
	    $ECHO "*** dependencies of module $libname.  Therefore, libtool will create"
	    echo "*** a static module, that should work as long as the dlopening"
	    echo "*** application is linked with the -dlopen flag."
	    if test -z $global_symbol_pipe {
	      echo
	      echo "*** However, this would only work if libtool was able to extract symbol"
	      echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
	      echo "*** not find such a program.  So, this module is probably useless."
	      echo "*** \`nm' from GNU binutils and a full rebuild may help."
	    }
	    if test $build_old_libs = no {
	      setvar oldlibs = ""$output_objdir/$libname.$libext""
	      setvar build_libtool_libs = 'module'
	      setvar build_old_libs = 'yes'
	    } else {
	      setvar build_libtool_libs = 'no'
	    }
	  } else {
	    echo "*** The inter-library dependencies that have been dropped here will be"
	    echo "*** automatically added whenever a program is linked with this library"
	    echo "*** or is declared to -dlopen it."

	    if test $allow_undefined = no {
	      echo
	      echo "*** Since this library must not contain undefined symbols,"
	      echo "*** because either the platform does not support them or"
	      echo "*** it was explicitly requested with -no-undefined,"
	      echo "*** libtool will only create a static version of it."
	      if test $build_old_libs = no {
		setvar oldlibs = ""$output_objdir/$libname.$libext""
		setvar build_libtool_libs = 'module'
		setvar build_old_libs = 'yes'
	      } else {
		setvar build_libtool_libs = 'no'
	      }
	    }
	  }
	}
	# Done checking deplibs!
	setvar deplibs = "$newdeplibs"
      }
      # Time to change all our "foo.ltframework" stuff back to "-framework foo"
      case (host) {
	*-*-darwin* {
	  setvar newdeplibs = $($ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)
	  setvar new_inherited_linker_flags = $($ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)
	  setvar deplibs = $($ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)
	  }
      }

      # move library search paths that coincide with paths to not yet
      # installed libraries to the beginning of the library search list
      setvar new_libs = ''
      for path in $notinst_path {
	case{
	*" -L$path/$objdir "* { }
	* {
	  case{
	  *" -L$path/$objdir "* {
	    func_append new_libs " -L$path/$objdir" }
	  }
	  }
	}
      }
      for deplib in $deplibs {
	case (deplib) {
	-L* {
	  case{
	  *" $deplib "* { }
	  * { func_append new_libs " $deplib" }
	  }
	  }
	* { func_append new_libs " $deplib" }
	}
      }
      setvar deplibs = "$new_libs"

      # All the library-specific variables (install_libdir is set above).
      setvar library_names = ''
      setvar old_library = ''
      setvar dlname = ''

      # Test again, we may have decided not to build it any more
      if test $build_libtool_libs = yes {
	# Remove ${wl} instances when linking with ld.
	# FIXME: should test the right _cmds variable.
	case (archive_cmds) {
	  *\$LD\ * { setvar wl = '' }
        }
	if test $hardcode_into_libs = yes {
	  # Hardcode the library paths
	  setvar hardcode_libdirs = ''
	  setvar dep_rpath = ''
	  setvar rpath = "$finalize_rpath"
	  test $opt_mode != relink && setvar rpath = ""$compile_rpath$rpath""
	  for libdir in $rpath {
	    if test -n $hardcode_libdir_flag_spec {
	      if test -n $hardcode_libdir_separator {
		func_replace_sysroot $libdir
		setvar libdir = "$func_replace_sysroot_result"
		if test -z $hardcode_libdirs {
		  setvar hardcode_libdirs = "$libdir"
		} else {
		  # Just accumulate the unique libdirs.
		  case (hardcode_libdir_separator) {
		  *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"* {
		    }
		  * {
		    func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
		    }
		  }
		}
	      } else {
		eval flag='"'$hardcode_libdir_flag_spec'"'
		func_append dep_rpath " $flag"
	      }
	    } elif test -n $runpath_var {
	      case{
	      *" $libdir "* { }
	      * { func_append perm_rpath " $libdir" }
	      }
	    }
	  }
	  # Substitute the hardcoded libdirs into the rpath.
	  if test -n $hardcode_libdir_separator &&
	     test -n $hardcode_libdirs {
	    setvar libdir = "$hardcode_libdirs"
	    eval "dep_rpath=\"$hardcode_libdir_flag_spec\""
	  }
	  if test -n $runpath_var && test -n $perm_rpath {
	    # We should set the runpath_var.
	    setvar rpath = ''
	    for dir in $perm_rpath {
	      func_append rpath "$dir:"
	    }
	    eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
	  }
	  test -n $dep_rpath && setvar deplibs = ""$dep_rpath $deplibs""
	}

	setvar shlibpath = "$finalize_shlibpath"
	test $opt_mode != relink && setvar shlibpath = ""$compile_shlibpath$shlibpath""
	if test -n $shlibpath {
	  eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
	}

	# Get the real and link names of the library.
	eval shared_ext='"'$shrext_cmds'"'
	eval library_names='"'$library_names_spec'"'
	set dummy $library_names
	shift
	setvar realname = "$1"
	shift

	if test -n $soname_spec {
	  eval soname='"'$soname_spec'"'
	} else {
	  setvar soname = "$realname"
	}
	if test -z $dlname {
	  setvar dlname = "$soname"
	}

	setvar lib = ""$output_objdir/$realname""
	setvar linknames = ''for link in @ARGV {
	  func_append linknames " $link"
	}

	# Use standard objects if they are pic
	test -z $pic_flag && setvar libobjs = $($ECHO $libobjs | $SP2NL | $SED $lo2o | $NL2SP)
	test "X$libobjs" = "X " && setvar libobjs = ''

	setvar delfiles = ''
	if test -n $export_symbols && test -n $include_expsyms {
	  $opt_dry_run || cp $export_symbols "$output_objdir/$libname.uexp"
	  setvar export_symbols = ""$output_objdir/$libname.uexp""
	  func_append delfiles " $export_symbols"
	}

	setvar orig_export_symbols = ''
	case (host_os) {
	cygwin* | mingw* | cegcc* {
	  if test -n $export_symbols && test -z $export_symbols_regex {
	    # exporting using user supplied symfile
	    if test "x$($SED 1q $export_symbols)" != xEXPORTS {
	      # and it's NOT already a .def file. Must figure out
	      # which of the given symbols are data symbols and tag
	      # them as such. So, trigger use of export_symbols_cmds.
	      # export_symbols gets reassigned inside the "prepare
	      # the list of exported symbols" if statement, so the
	      # include_expsyms logic still works.
	      setvar orig_export_symbols = "$export_symbols"
	      setvar export_symbols = ''
	      setvar always_export_symbols = 'yes'
	    }
	  }
	  }
	}

	# Prepare the list of exported symbols
	if test -z $export_symbols {
	  if test $always_export_symbols = yes || test -n $export_symbols_regex {
	    func_verbose "generating symbol list for \`$libname.la'"
	    setvar export_symbols = ""$output_objdir/$libname.exp""
	    $opt_dry_run || $RM $export_symbols
	    setvar cmds = "$export_symbols_cmds"
	    setvar save_ifs = "$IFS"; setvar IFS = ''~''
	    for cmd1 in $cmds {
	      setvar IFS = "$save_ifs"
	      # Take the normal branch if the nm_file_list_spec branch
	      # doesn't work or if tool conversion is not needed.
	      case (nm_file_list_spec) {
		*~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~* {
		  setvar try_normal_branch = 'yes'
		  eval cmd='"'$cmd1'"'
		  func_len " $cmd"
		  setvar len = "$func_len_result"
		  }
		* {
		  setvar try_normal_branch = 'no'
		  }
	      }
	      if test $try_normal_branch = yes \
		 && do { test $len -lt $max_cmd_len \
		      || test $max_cmd_len -le -1; }
	      {
		func_show_eval $cmd 'exit $?'
		setvar skipped_export = 'false'
	      } elif test -n $nm_file_list_spec {
		func_basename $output
		setvar output_la = "$func_basename_result"
		setvar save_libobjs = "$libobjs"
		setvar save_output = "$output"
		setvar output = "${output_objdir}/${output_la}.nm"
		func_to_tool_file $output
		setvar libobjs = "$nm_file_list_spec$func_to_tool_file_result"
		func_append delfiles " $output"
		func_verbose "creating $NM input file list: $output"
		for obj in $save_libobjs {
		  func_to_tool_file $obj
		  $ECHO $func_to_tool_file_result
		} > "$output"
		eval cmd='"'$cmd1'"'
		func_show_eval $cmd 'exit $?'
		setvar output = "$save_output"
		setvar libobjs = "$save_libobjs"
		setvar skipped_export = 'false'
	      } else {
		# The command line is too long to execute in one step.
		func_verbose "using reloadable object file for export list..."
		setvar skipped_export = ':'
		# Break out early, otherwise skipped_export may be
		# set to false by a later but shorter cmd.
		break
	      }
	    }
	    setvar IFS = "$save_ifs"
	    if test -n $export_symbols_regex && test "X$skipped_export" != "X:" {
	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
	    }
	  }
	}

	if test -n $export_symbols && test -n $include_expsyms {
	  setvar tmp_export_symbols = "$export_symbols"
	  test -n $orig_export_symbols && setvar tmp_export_symbols = "$orig_export_symbols"
	  $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
	}

	if test "X$skipped_export" != "X:" && test -n $orig_export_symbols {
	  # The given exports_symbols file has to be filtered, so filter it.
	  func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
	  # FIXME: $output_objdir/$libname.filter potentially contains lots of
	  # 's' commands which not all seds can handle. GNU sed should be fine
	  # though. Also, the filter scales superlinearly with the number of
	  # global variables. join(1) would be nice here, but unfortunately
	  # isn't a blessed tool.
	  $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
	  func_append delfiles " $export_symbols $output_objdir/$libname.filter"
	  setvar export_symbols = "$output_objdir/$libname.def"
	  $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
	}

	setvar tmp_deplibs = ''
	for test_deplib in $deplibs {
	  case{
	  *" $test_deplib "* { }
	  * {
	    func_append tmp_deplibs " $test_deplib"
	    }
	  }
	}
	setvar deplibs = "$tmp_deplibs"

	if test -n $convenience {
	  if test -n $whole_archive_flag_spec &&
	    test $compiler_needs_object = yes &&
	    test -z $libobjs {
	    # extract the archives, so we have objects to list.
	    # TODO: could optimize this to just extract one archive.
	    setvar whole_archive_flag_spec = ''
	  }
	  if test -n $whole_archive_flag_spec {
	    setvar save_libobjs = "$libobjs"
	    eval libobjs='"''$'libobjs $whole_archive_flag_spec'"'
	    test "X$libobjs" = "X " && setvar libobjs = ''
	  } else {
	    setvar gentop = ""$output_objdir/${outputname}x""
	    func_append generated " $gentop"

	    func_extract_archives $gentop $convenience
	    func_append libobjs " $func_extract_archives_result"
	    test "X$libobjs" = "X " && setvar libobjs = ''
	  }
	}

	if test $thread_safe = yes && test -n $thread_safe_flag_spec {
	  eval flag='"'$thread_safe_flag_spec'"'
	  func_append linker_flags " $flag"
	}

	# Make a backup of the uninstalled library when relinking
	if test $opt_mode = relink {
	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
	}

	# Do each of the archive commands.
	if test $module = yes && test -n $module_cmds  {
	  if test -n $export_symbols && test -n $module_expsym_cmds {
	    eval test_cmds='"'$module_expsym_cmds'"'
	    setvar cmds = "$module_expsym_cmds"
	  } else {
	    eval test_cmds='"'$module_cmds'"'
	    setvar cmds = "$module_cmds"
	  }
	} else {
	  if test -n $export_symbols && test -n $archive_expsym_cmds {
	    eval test_cmds='"'$archive_expsym_cmds'"'
	    setvar cmds = "$archive_expsym_cmds"
	  } else {
	    eval test_cmds='"'$archive_cmds'"'
	    setvar cmds = "$archive_cmds"
	  }
	}

	if test "X$skipped_export" != "X:" &&
	   func_len " $test_cmds" &&
	   setvar len = "$func_len_result" &&
	   test $len -lt $max_cmd_len || test $max_cmd_len -le -1 {
	  :
	} else {
	  # The command line is too long to link in one step, link piecewise
	  # or, if using GNU ld and skipped_export is not :, use a linker
	  # script.

	  # Save the value of $output and $libobjs because we want to
	  # use them later.  If we have whole_archive_flag_spec, we
	  # want to use save_libobjs as it was before
	  # whole_archive_flag_spec was expanded, because we can't
	  # assume the linker understands whole_archive_flag_spec.
	  # This may have to be revisited, in case too many
	  # convenience libraries get linked in and end up exceeding
	  # the spec.
	  if test -z $convenience || test -z $whole_archive_flag_spec {
	    setvar save_libobjs = "$libobjs"
	  }
	  setvar save_output = "$output"
	  func_basename $output
	  setvar output_la = "$func_basename_result"

	  # Clear the reloadable object creation command queue and
	  # initialize k to one.
	  setvar test_cmds = ''
	  setvar concat_cmds = ''
	  setvar objlist = ''
	  setvar last_robj = ''
	  setvar k = '1'

	  if test -n $save_libobjs && test "X$skipped_export" != "X:" && test $with_gnu_ld = yes {
	    setvar output = "${output_objdir}/${output_la}.lnkscript"
	    func_verbose "creating GNU ld script: $output"
	    echo 'INPUT (' > $output
	    for obj in $save_libobjs
	    {
	      func_to_tool_file $obj
	      $ECHO $func_to_tool_file_result >> $output
	    }
	    echo ')' >> $output
	    func_append delfiles " $output"
	    func_to_tool_file $output
	    setvar output = "$func_to_tool_file_result"
	  } elif test -n $save_libobjs && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X {
	    setvar output = "${output_objdir}/${output_la}.lnk"
	    func_verbose "creating linker input file list: $output"
	    : > $output
	    set x $save_libobjs
	    shift
	    setvar firstobj = ''
	    if test $compiler_needs_object = yes {
	      setvar firstobj = ""$1 ""
	      shift
	    }for obj in @ARGV {
	      func_to_tool_file $obj
	      $ECHO $func_to_tool_file_result >> $output
	    }
	    func_append delfiles " $output"
	    func_to_tool_file $output
	    setvar output = "$firstobj'"'$file_list_spec$func_to_tool_file_result'"'"
	  } else {
	    if test -n $save_libobjs {
	      func_verbose "creating reloadable object files..."
	      setvar output = "$output_objdir/$output_la-${k}.$objext"
	      eval test_cmds='"'$reload_cmds'"'
	      func_len " $test_cmds"
	      setvar len0 = "$func_len_result"
	      setvar len = "$len0"

	      # Loop over the list of objects to be linked.
	      for obj in $save_libobjs
	      {
		func_len " $obj"
		func_arith $len + $func_len_result
		setvar len = "$func_arith_result"
		if test "X$objlist" = X ||
		   test $len -lt $max_cmd_len {
		  func_append objlist " $obj"
		} else {
		  # The command $test_cmds is almost too long, add a
		  # command to the queue.
		  if test $k -eq 1  {
		    # The first file doesn't have a previous command to add.
		    setvar reload_objs = "$objlist"
		    eval concat_cmds='"'$reload_cmds'"'
		  } else {
		    # All subsequent reloadable object files will link in
		    # the last one created.
		    setvar reload_objs = ""$objlist $last_robj""
		    eval concat_cmds='"''$'concat_cmds~$reload_cmds~'$'RM $last_robj'"'
		  }
		  setvar last_robj = "$output_objdir/$output_la-${k}.$objext"
		  func_arith $k + 1
		  setvar k = "$func_arith_result"
		  setvar output = "$output_objdir/$output_la-${k}.$objext"
		  setvar objlist = "" $obj""
		  func_len " $last_robj"
		  func_arith $len0 + $func_len_result
		  setvar len = "$func_arith_result"
		}
	      }
	      # Handle the remaining objects by creating one last
	      # reloadable object file.  All subsequent reloadable object
	      # files will link in the last one created.
	      test -z $concat_cmds || setvar concat_cmds = "$concat_cmds~"
	      setvar reload_objs = ""$objlist $last_robj""
	      eval concat_cmds='"''$'{concat_cmds}$reload_cmds'"'
	      if test -n $last_robj {
	        eval concat_cmds='"''$'{concat_cmds}~'$'RM $last_robj'"'
	      }
	      func_append delfiles " $output"

	    } else {
	      setvar output = ''
	    }

	    if ${skipped_export-false} {
	      func_verbose "generating symbol list for \`$libname.la'"
	      setvar export_symbols = ""$output_objdir/$libname.exp""
	      $opt_dry_run || $RM $export_symbols
	      setvar libobjs = "$output"
	      # Append the command to create the export file.
	      test -z $concat_cmds || setvar concat_cmds = "$concat_cmds~"
	      eval concat_cmds='"''$'concat_cmds$export_symbols_cmds'"'
	      if test -n $last_robj {
		eval concat_cmds='"''$'concat_cmds~'$'RM $last_robj'"'
	      }
	    }

	    test -n $save_libobjs &&
	      func_verbose "creating a temporary reloadable object file: $output"

	    # Loop through the commands generated above and execute them.
	    setvar save_ifs = "$IFS"; setvar IFS = ''~''
	    for cmd in $concat_cmds {
	      setvar IFS = "$save_ifs"
	      $opt_silent || do {
		  func_quote_for_expand $cmd
		  eval "func_echo $func_quote_for_expand_result"
	      }
	      $opt_dry_run || eval $cmd || do {
		setvar lt_exit = ""$?

		# Restore the uninstalled library and exit
		if test $opt_mode = relink {
		  shell { cd $output_objdir && \
		    $RM "${realname}T" && \
		    $MV "${realname}U" $realname }
		}

		exit $lt_exit
	      }
	    }
	    setvar IFS = "$save_ifs"

	    if test -n $export_symbols_regex && ${skipped_export-false} {
	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
	    }
	  }

          if ${skipped_export-false} {
	    if test -n $export_symbols && test -n $include_expsyms {
	      setvar tmp_export_symbols = "$export_symbols"
	      test -n $orig_export_symbols && setvar tmp_export_symbols = "$orig_export_symbols"
	      $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
	    }

	    if test -n $orig_export_symbols {
	      # The given exports_symbols file has to be filtered, so filter it.
	      func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
	      # FIXME: $output_objdir/$libname.filter potentially contains lots of
	      # 's' commands which not all seds can handle. GNU sed should be fine
	      # though. Also, the filter scales superlinearly with the number of
	      # global variables. join(1) would be nice here, but unfortunately
	      # isn't a blessed tool.
	      $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
	      func_append delfiles " $export_symbols $output_objdir/$libname.filter"
	      setvar export_symbols = "$output_objdir/$libname.def"
	      $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
	    }
	  }

	  setvar libobjs = "$output"
	  # Restore the value of output.
	  setvar output = "$save_output"

	  if test -n $convenience && test -n $whole_archive_flag_spec {
	    eval libobjs='"''$'libobjs $whole_archive_flag_spec'"'
	    test "X$libobjs" = "X " && setvar libobjs = ''
	  }
	  # Expand the library linking commands again to reset the
	  # value of $libobjs for piecewise linking.

	  # Do each of the archive commands.
	  if test $module = yes && test -n $module_cmds  {
	    if test -n $export_symbols && test -n $module_expsym_cmds {
	      setvar cmds = "$module_expsym_cmds"
	    } else {
	      setvar cmds = "$module_cmds"
	    }
	  } else {
	    if test -n $export_symbols && test -n $archive_expsym_cmds {
	      setvar cmds = "$archive_expsym_cmds"
	    } else {
	      setvar cmds = "$archive_cmds"
	    }
	  }
	}

	if test -n $delfiles {
	  # Append the command to remove temporary files to $cmds.
	  eval cmds='"''$'cmds~'$'RM $delfiles'"'
	}

	# Add any objects from preloaded convenience libraries
	if test -n $dlprefiles {
	  setvar gentop = ""$output_objdir/${outputname}x""
	  func_append generated " $gentop"

	  func_extract_archives $gentop $dlprefiles
	  func_append libobjs " $func_extract_archives_result"
	  test "X$libobjs" = "X " && setvar libobjs = ''
	}

	setvar save_ifs = "$IFS"; setvar IFS = ''~''
	for cmd in $cmds {
	  setvar IFS = "$save_ifs"
	  eval cmd='"'$cmd'"'
	  $opt_silent || do {
	    func_quote_for_expand $cmd
	    eval "func_echo $func_quote_for_expand_result"
	  }
	  $opt_dry_run || eval $cmd || do {
	    setvar lt_exit = ""$?

	    # Restore the uninstalled library and exit
	    if test $opt_mode = relink {
	      shell { cd $output_objdir && \
	        $RM "${realname}T" && \
		$MV "${realname}U" $realname }
	    }

	    exit $lt_exit
	  }
	}
	setvar IFS = "$save_ifs"

	# Restore the uninstalled library and exit
	if test $opt_mode = relink {
	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?

	  if test -n $convenience {
	    if test -z $whole_archive_flag_spec {
	      func_show_eval '${RM}r "$gentop"'
	    }
	  }

	  exit $EXIT_SUCCESS
	}

	# Create links to the real library.
	for linkname in $linknames {
	  if test $realname != $linkname {
	    func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
	  }
	}

	# If -module or -export-dynamic was specified, set the dlname.
	if test $module = yes || test $export_dynamic = yes {
	  # On all known operating systems, these are identical.
	  setvar dlname = "$soname"
	}
      }
      }

    obj {
      if test -n "$dlfiles$dlprefiles" || test $dlself != no {
	func_warning "\`-dlopen' is ignored for objects"
      }

      case{
      *\ -l* | *\ -L* {
	func_warning "\`-l' and \`-L' are ignored for objects" }
      }

      test -n $rpath && \
	func_warning "\`-rpath' is ignored for objects"

      test -n $xrpath && \
	func_warning "\`-R' is ignored for objects"

      test -n $vinfo && \
	func_warning "\`-version-info' is ignored for objects"

      test -n $release && \
	func_warning "\`-release' is ignored for objects"

      case (output) {
      *.lo {
	test -n "$objs$old_deplibs" && \
	  func_fatal_error "cannot build library object \`$output' from non-libtool objects"

	setvar libobj = "$output"
	func_lo2o $libobj
	setvar obj = "$func_lo2o_result"
	}
      * {
	setvar libobj = ''
	setvar obj = "$output"
	}
      }

      # Delete the old objects.
      $opt_dry_run || $RM $obj $libobj

      # Objects from convenience libraries.  This assumes
      # single-version convenience libraries.  Whenever we create
      # different ones for PIC/non-PIC, this we'll have to duplicate
      # the extraction.
      setvar reload_conv_objs = ''
      setvar gentop = ''
      # reload_cmds runs $LD directly, so let us get rid of
      # -Wl from whole_archive_flag_spec and hope we can get by with
      # turning comma into space..
      setvar wl = ''

      if test -n $convenience {
	if test -n $whole_archive_flag_spec {
	  eval tmp_whole_archive_flags='"'$whole_archive_flag_spec'"'
	  setvar reload_conv_objs = "$reload_objs' '$($ECHO $tmp_whole_archive_flags | $SED 's|,| |g)"
	} else {
	  setvar gentop = ""$output_objdir/${obj}x""
	  func_append generated " $gentop"

	  func_extract_archives $gentop $convenience
	  setvar reload_conv_objs = ""$reload_objs $func_extract_archives_result""
	}
      }

      # If we're not building shared, we need to use non_pic_objs
      test $build_libtool_libs != yes && setvar libobjs = "$non_pic_objects"

      # Create the old-style object.
      setvar reload_objs = ""$objs$old_deplibs "$($ECHO $libobjs | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP)" $reload_conv_objs"" ### testsuite: skip nested quoting test

      setvar output = "$obj"
      func_execute_cmds $reload_cmds 'exit $?'

      # Exit if we aren't doing a library object file.
      if test -z $libobj {
	if test -n $gentop {
	  func_show_eval '${RM}r "$gentop"'
	}

	exit $EXIT_SUCCESS
      }

      if test $build_libtool_libs != yes {
	if test -n $gentop {
	  func_show_eval '${RM}r "$gentop"'
	}

	# Create an invalid libtool object if no PIC, so that we don't
	# accidentally link it into a program.
	# $show "echo timestamp > $libobj"
	# $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
	exit $EXIT_SUCCESS
      }

      if test -n $pic_flag || test $pic_mode != default {
	# Only do commands if we really have different PIC objects.
	setvar reload_objs = ""$libobjs $reload_conv_objs""
	setvar output = "$libobj"
	func_execute_cmds $reload_cmds 'exit $?'
      }

      if test -n $gentop {
	func_show_eval '${RM}r "$gentop"'
      }

      exit $EXIT_SUCCESS
      }

    prog {
      case (host) {
	*cygwin* { func_stripname '' '.exe' $output
	          setvar output = "$func_stripname_result.exe"}
      }
      test -n $vinfo && \
	func_warning "\`-version-info' is ignored for programs"

      test -n $release && \
	func_warning "\`-release' is ignored for programs"

      test $preload = yes \
        && test $dlopen_support = unknown \
	&& test $dlopen_self = unknown \
	&& test $dlopen_self_static = unknown && \
	  func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support."

      case (host) {
      *-*-rhapsody* | *-*-darwin1.[012] {
	# On Rhapsody replace the C library is the System framework
	setvar compile_deplibs = $($ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /)
	setvar finalize_deplibs = $($ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /)
	}
      }

      case (host) {
      *-*-darwin* {
	# Don't allow lazy linking, it breaks C++ global constructors
	# But is supposedly fixed on 10.4 or later (yay!).
	if test $tagname = CXX  {
	  case{
	    10.[0123] {
	      func_append compile_command " ${wl}-bind_at_load"
	      func_append finalize_command " ${wl}-bind_at_load"
	    }
	  }
	}
	# Time to change all our "foo.ltframework" stuff back to "-framework foo"
	setvar compile_deplibs = $($ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)
	setvar finalize_deplibs = $($ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g)
	}
      }


      # move library search paths that coincide with paths to not yet
      # installed libraries to the beginning of the library search list
      setvar new_libs = ''
      for path in $notinst_path {
	case{
	*" -L$path/$objdir "* { }
	* {
	  case{
	  *" -L$path/$objdir "* {
	    func_append new_libs " -L$path/$objdir" }
	  }
	  }
	}
      }
      for deplib in $compile_deplibs {
	case (deplib) {
	-L* {
	  case{
	  *" $deplib "* { }
	  * { func_append new_libs " $deplib" }
	  }
	  }
	* { func_append new_libs " $deplib" }
	}
      }
      setvar compile_deplibs = "$new_libs"


      func_append compile_command " $compile_deplibs"
      func_append finalize_command " $finalize_deplibs"

      if test -n "$rpath$xrpath" {
	# If the user specified any rpath flags, then add them.
	for libdir in $rpath $xrpath {
	  # This is the magic to use -rpath.
	  case{
	  *" $libdir "* { }
	  * { func_append finalize_rpath " $libdir" }
	  }
	}
      }

      # Now hardcode the library paths
      setvar rpath = ''
      setvar hardcode_libdirs = ''
      for libdir in $compile_rpath $finalize_rpath {
	if test -n $hardcode_libdir_flag_spec {
	  if test -n $hardcode_libdir_separator {
	    if test -z $hardcode_libdirs {
	      setvar hardcode_libdirs = "$libdir"
	    } else {
	      # Just accumulate the unique libdirs.
	      case (hardcode_libdir_separator) {
	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"* {
		}
	      * {
		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
		}
	      }
	    }
	  } else {
	    eval flag='"'$hardcode_libdir_flag_spec'"'
	    func_append rpath " $flag"
	  }
	} elif test -n $runpath_var {
	  case{
	  *" $libdir "* { }
	  * { func_append perm_rpath " $libdir" }
	  }
	}
	case (host) {
	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc* {
	  setvar testbindir = $(${ECHO} $libdir | ${SED} -e 's*/lib$*/bin*)
	  case{
	  *":$libdir:"* { }
	  :: { setvar dllsearchpath = "$libdir"}
	  * { func_append dllsearchpath ":$libdir"}
	  }
	  case{
	  *":$testbindir:"* { }
	  :: { setvar dllsearchpath = "$testbindir"}
	  * { func_append dllsearchpath ":$testbindir"}
	  }
	  }
	}
      }
      # Substitute the hardcoded libdirs into the rpath.
      if test -n $hardcode_libdir_separator &&
	 test -n $hardcode_libdirs {
	setvar libdir = "$hardcode_libdirs"
	eval rpath='"' $hardcode_libdir_flag_spec'"'
      }
      setvar compile_rpath = "$rpath"

      setvar rpath = ''
      setvar hardcode_libdirs = ''
      for libdir in $finalize_rpath {
	if test -n $hardcode_libdir_flag_spec {
	  if test -n $hardcode_libdir_separator {
	    if test -z $hardcode_libdirs {
	      setvar hardcode_libdirs = "$libdir"
	    } else {
	      # Just accumulate the unique libdirs.
	      case (hardcode_libdir_separator) {
	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"* {
		}
	      * {
		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
		}
	      }
	    }
	  } else {
	    eval flag='"'$hardcode_libdir_flag_spec'"'
	    func_append rpath " $flag"
	  }
	} elif test -n $runpath_var {
	  case{
	  *" $libdir "* { }
	  * { func_append finalize_perm_rpath " $libdir" }
	  }
	}
      }
      # Substitute the hardcoded libdirs into the rpath.
      if test -n $hardcode_libdir_separator &&
	 test -n $hardcode_libdirs {
	setvar libdir = "$hardcode_libdirs"
	eval rpath='"' $hardcode_libdir_flag_spec'"'
      }
      setvar finalize_rpath = "$rpath"

      if test -n $libobjs && test $build_old_libs = yes {
	# Transform all the library objects into standard objects.
	setvar compile_command = $($ECHO $compile_command | $SP2NL | $SED $lo2o | $NL2SP)
	setvar finalize_command = $($ECHO $finalize_command | $SP2NL | $SED $lo2o | $NL2SP)
      }

      func_generate_dlsyms $outputname "@PROGRAM@" "no"

      # template prelinking step
      if test -n $prelink_cmds {
	func_execute_cmds $prelink_cmds 'exit $?'
      }

      setvar wrappers_required = 'yes'
      case (host) {
      *cegcc* | *mingw32ce* {
        # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway.
        setvar wrappers_required = 'no'
        }
      *cygwin* | *mingw*  {
        if test $build_libtool_libs != yes {
          setvar wrappers_required = 'no'
        }
        }
      * {
        if test $need_relink = no || test $build_libtool_libs != yes {
          setvar wrappers_required = 'no'
        }
        }
      }
      if test $wrappers_required = no {
	# Replace the output file specification.
	setvar compile_command = $($ECHO $compile_command | $SED 's%@OUTPUT@%'"$output"'%g)
	setvar link_command = ""$compile_command$compile_rpath""

	# We have no uninstalled library dependencies, so finalize right now.
	setvar exit_status = '0'
	func_show_eval $link_command 'exit_status=$?'

	if test -n $postlink_cmds {
	  func_to_tool_file $output
	  setvar postlink_cmds = $(func_echo_all $postlink_cmds | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g)
	  func_execute_cmds $postlink_cmds 'exit $?'
	}

	# Delete the generated files.
	if test -f "$output_objdir/${outputname}S.${objext}" {
	  func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"'
	}

	exit $exit_status
      }

      if test -n "$compile_shlibpath$finalize_shlibpath" {
	setvar compile_command = ""$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command""
      }
      if test -n $finalize_shlibpath {
	setvar finalize_command = ""$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command""
      }

      setvar compile_var = ''
      setvar finalize_var = ''
      if test -n $runpath_var {
	if test -n $perm_rpath {
	  # We should set the runpath_var.
	  setvar rpath = ''
	  for dir in $perm_rpath {
	    func_append rpath "$dir:"
	  }
	  setvar compile_var = ""$runpath_var=\"$rpath\$$runpath_var\" ""
	}
	if test -n $finalize_perm_rpath {
	  # We should set the runpath_var.
	  setvar rpath = ''
	  for dir in $finalize_perm_rpath {
	    func_append rpath "$dir:"
	  }
	  setvar finalize_var = ""$runpath_var=\"$rpath\$$runpath_var\" ""
	}
      }

      if test $no_install = yes {
	# We don't need to create a wrapper script.
	setvar link_command = ""$compile_var$compile_command$compile_rpath""
	# Replace the output file specification.
	setvar link_command = $($ECHO $link_command | $SED 's%@OUTPUT@%'"$output"'%g)
	# Delete the old output file.
	$opt_dry_run || $RM $output
	# Link the executable and exit
	func_show_eval $link_command 'exit $?'

	if test -n $postlink_cmds {
	  func_to_tool_file $output
	  setvar postlink_cmds = $(func_echo_all $postlink_cmds | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g)
	  func_execute_cmds $postlink_cmds 'exit $?'
	}

	exit $EXIT_SUCCESS
      }

      if test $hardcode_action = relink {
	# Fast installation is not supported
	setvar link_command = ""$compile_var$compile_command$compile_rpath""
	setvar relink_command = ""$finalize_var$finalize_command$finalize_rpath""

	func_warning "this platform does not like uninstalled shared libraries"
	func_warning "\`$output' will be relinked during installation"
      } else {
	if test $fast_install != no {
	  setvar link_command = ""$finalize_var$compile_command$finalize_rpath""
	  if test $fast_install = yes {
	    setvar relink_command = $($ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g)
	  } else {
	    # fast_install is set to needless
	    setvar relink_command = ''
	  }
	} else {
	  setvar link_command = ""$compile_var$compile_command$compile_rpath""
	  setvar relink_command = ""$finalize_var$finalize_command$finalize_rpath""
	}
      }

      # Replace the output file specification.
      setvar link_command = $($ECHO $link_command | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g)

      # Delete the old output files.
      $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname

      func_show_eval $link_command 'exit $?'

      if test -n $postlink_cmds {
	func_to_tool_file "$output_objdir/$outputname"
	setvar postlink_cmds = $(func_echo_all $postlink_cmds | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g)
	func_execute_cmds $postlink_cmds 'exit $?'
      }

      # Now create the wrapper script.
      func_verbose "creating $output"

      # Quote the relink command for shipping.
      if test -n $relink_command {
	# Preserve any variables that may affect compiler behavior
	for var in $variables_saved_for_relink {
	  if eval test -z '"''$'{$var+set}'"' {
	    setvar relink_command = ""{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command""
	  } elif eval var_value='$'$var; test -z $var_value; {
	    setvar relink_command = ""$var=; export $var; $relink_command""
	  } else {
	    func_quote_for_eval $var_value
	    setvar relink_command = ""$var=$func_quote_for_eval_result; export $var; $relink_command""
	  }
	}
	setvar relink_command = ""(cd $(pwd); $relink_command)""
	setvar relink_command = $($ECHO $relink_command | $SED $sed_quote_subst)
      }

      # Only actually do things if not in dry run mode.
      $opt_dry_run || do {
	# win32 will think the script is a binary if it has
	# a .exe suffix, so we strip it off here.
	case (output) {
	  *.exe { func_stripname '' '.exe' $output
	         setvar output = "$func_stripname_result" }
	}
	# test for cygwin because mv fails w/o .exe extensions
	case (host) {
	  *cygwin* {
	    setvar exeext = '.exe'
	    func_stripname '' '.exe' $outputname
	    setvar outputname = "$func_stripname_result" }
	  * { setvar exeext = '' }
	}
	case (host) {
	  *cygwin* | *mingw*  {
	    func_dirname_and_basename $output "" "."
	    setvar output_name = "$func_basename_result"
	    setvar output_path = "$func_dirname_result"
	    setvar cwrappersource = ""$output_path/$objdir/lt-$output_name.c""
	    setvar cwrapper = ""$output_path/$output_name.exe""
	    $RM $cwrappersource $cwrapper
	    trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15

	    func_emit_cwrapperexe_src > $cwrappersource

	    # The wrapper executable is built using the $host compiler,
	    # because it contains $host paths and files. If cross-
	    # compiling, it, like the target executable, must be
	    # executed on the $host or under an emulation environment.
	    $opt_dry_run || do {
	      $LTCC $LTCFLAGS -o $cwrapper $cwrappersource
	      $STRIP $cwrapper
	    }

	    # Now, create the wrapper script for func_source use:
	    func_ltwrapper_scriptname $cwrapper
	    $RM $func_ltwrapper_scriptname_result
	    trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
	    $opt_dry_run || do {
	      # note: this script will not be executed, so do not chmod.
	      if test "x$build" = "x$host"  {
		$cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
	      } else {
		func_emit_wrapper no > $func_ltwrapper_scriptname_result
	      }
	    }
	  }
	  *  {
	    $RM $output
	    trap "$RM $output; exit $EXIT_FAILURE" 1 2 15

	    func_emit_wrapper no > $output
	    chmod +x $output
	  }
	}
      }
      exit $EXIT_SUCCESS
      }
    }

    # See if we need to build an old-fashioned archive.
    for oldlib in $oldlibs {

      if test $build_libtool_libs = convenience {
	setvar oldobjs = ""$libobjs_save $symfileobj""
	setvar addlibs = "$convenience"
	setvar build_libtool_libs = 'no'
      } else {
	if test $build_libtool_libs = module {
	  setvar oldobjs = "$libobjs_save"
	  setvar build_libtool_libs = 'no'
	} else {
	  setvar oldobjs = ""$old_deplibs $non_pic_objects""
	  if test $preload = yes && test -f $symfileobj {
	    func_append oldobjs " $symfileobj"
	  }
	}
	setvar addlibs = "$old_convenience"
      }

      if test -n $addlibs {
	setvar gentop = ""$output_objdir/${outputname}x""
	func_append generated " $gentop"

	func_extract_archives $gentop $addlibs
	func_append oldobjs " $func_extract_archives_result"
      }

      # Do each command in the archive commands.
      if test -n $old_archive_from_new_cmds && test $build_libtool_libs = yes {
	setvar cmds = "$old_archive_from_new_cmds"
      } else {

	# Add any objects from preloaded convenience libraries
	if test -n $dlprefiles {
	  setvar gentop = ""$output_objdir/${outputname}x""
	  func_append generated " $gentop"

	  func_extract_archives $gentop $dlprefiles
	  func_append oldobjs " $func_extract_archives_result"
	}

	# POSIX demands no paths to be encoded in archives.  We have
	# to avoid creating archives with duplicate basenames if we
	# might have to extract them afterwards, e.g., when creating a
	# static archive out of a convenience library, or when linking
	# the entirety of a libtool archive into another (currently
	# not supported by libtool).
	if shell {for obj in $oldobjs
	    {
	      func_basename $obj
	      $ECHO $func_basename_result
	    } | sort | sort -uc >/dev/null 2>&1} {
	  :
	} else {
	  echo "copying selected object files to avoid basename conflicts..."
	  setvar gentop = ""$output_objdir/${outputname}x""
	  func_append generated " $gentop"
	  func_mkdir_p $gentop
	  setvar save_oldobjs = "$oldobjs"
	  setvar oldobjs = ''
	  setvar counter = '1'
	  for obj in $save_oldobjs
	  {
	    func_basename $obj
	    setvar objbase = "$func_basename_result"
	    case{
	    " " { setvar oldobjs = "$obj" }
	    *[\ /]"$objbase "* {
	      while : {
		# Make sure we don't pick an alternate name that also
		# overlaps.
		setvar newobj = "lt$counter-$objbase"
		func_arith $counter + 1
		setvar counter = "$func_arith_result"
		case{
		*[\ /]"$newobj "* { }
		* { if test ! -f "$gentop/$newobj" { break; } }
		}
	      }
	      func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
	      func_append oldobjs " $gentop/$newobj"
	      }
	    * { func_append oldobjs " $obj" }
	    }
	  }
	}
	func_to_tool_file $oldlib func_convert_file_msys_to_w32
	setvar tool_oldlib = "$func_to_tool_file_result"
	eval cmds='"'$old_archive_cmds'"'

	func_len " $cmds"
	setvar len = "$func_len_result"
	if test $len -lt $max_cmd_len || test $max_cmd_len -le -1 {
	  setvar cmds = "$old_archive_cmds"
	} elif test -n $archiver_list_spec {
	  func_verbose "using command file archive linking..."
	  for obj in $oldobjs
	  {
	    func_to_tool_file $obj
	    $ECHO $func_to_tool_file_result
	  } > $output_objdir/$libname.libcmd
	  func_to_tool_file "$output_objdir/$libname.libcmd"
	  setvar oldobjs = "" $archiver_list_spec$func_to_tool_file_result""
	  setvar cmds = "$old_archive_cmds"
	} else {
	  # the command line is too long to link in one step, link in parts
	  func_verbose "using piecewise archive linking..."
	  setvar save_RANLIB = "$RANLIB"
	  setvar RANLIB = ':'
	  setvar objlist = ''
	  setvar concat_cmds = ''
	  setvar save_oldobjs = "$oldobjs"
	  setvar oldobjs = ''
	  # Is there a better way of finding the last object in the list?
	  for obj in $save_oldobjs
	  {
	    setvar last_oldobj = "$obj"
	  }
	  eval test_cmds='"'$old_archive_cmds'"'
	  func_len " $test_cmds"
	  setvar len0 = "$func_len_result"
	  setvar len = "$len0"
	  for obj in $save_oldobjs
	  {
	    func_len " $obj"
	    func_arith $len + $func_len_result
	    setvar len = "$func_arith_result"
	    func_append objlist " $obj"
	    if test $len -lt $max_cmd_len {
	      :
	    } else {
	      # the above command should be used before it gets too long
	      setvar oldobjs = "$objlist"
	      if test $obj = $last_oldobj  {
		setvar RANLIB = "$save_RANLIB"
	      }
	      test -z $concat_cmds || setvar concat_cmds = "$concat_cmds~"
	      eval concat_cmds='"''$'{concat_cmds}$old_archive_cmds'"'
	      setvar objlist = ''
	      setvar len = "$len0"
	    }
	  }
	  setvar RANLIB = "$save_RANLIB"
	  setvar oldobjs = "$objlist"
	  if test "X$oldobjs" = "X"  {
	    eval cmds='"''$'concat_cmds'"'
	  } else {
	    eval cmds='"''$'concat_cmds~'$'old_archive_cmds'"'
	  }
	}
      }
      func_execute_cmds $cmds 'exit $?'
    }

    test -n $generated && \
      func_show_eval "${RM}r$generated"

    # Now create the libtool archive.
    case (output) {
    *.la {
      setvar old_library = ''
      test $build_old_libs = yes && setvar old_library = ""$libname.$libext""
      func_verbose "creating $output"

      # Preserve any variables that may affect compiler behavior
      for var in $variables_saved_for_relink {
	if eval test -z '"''$'{$var+set}'"' {
	  setvar relink_command = ""{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command""
	} elif eval var_value='$'$var; test -z $var_value; {
	  setvar relink_command = ""$var=; export $var; $relink_command""
	} else {
	  func_quote_for_eval $var_value
	  setvar relink_command = ""$var=$func_quote_for_eval_result; export $var; $relink_command""
	}
      }
      # Quote the link command for shipping.
      setvar relink_command = ""(cd $(pwd); $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)""
      setvar relink_command = $($ECHO $relink_command | $SED $sed_quote_subst)
      if test $hardcode_automatic = yes  {
	setvar relink_command = ''
      }

      # Only create the output if not a dry run.
      $opt_dry_run || do {
	for installed in no yes {
	  if test $installed = yes {
	    if test -z $install_libdir {
	      break
	    }
	    setvar output = ""$output_objdir/$outputname"i"
	    # Replace all uninstalled libtool libraries with the installed ones
	    setvar newdependency_libs = ''
	    for deplib in $dependency_libs {
	      case (deplib) {
	      *.la {
		func_basename $deplib
		setvar name = "$func_basename_result"
		func_resolve_sysroot $deplib
		eval libdir=$(${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result)
		test -z $libdir && \
		  func_fatal_error "\`$deplib' is not a valid libtool archive"
		func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name"
		}
	      -L* {
		func_stripname -L '' $deplib
		func_replace_sysroot $func_stripname_result
		func_append newdependency_libs " -L$func_replace_sysroot_result"
		}
	      -R* {
		func_stripname -R '' $deplib
		func_replace_sysroot $func_stripname_result
		func_append newdependency_libs " -R$func_replace_sysroot_result"
		}
	      * { func_append newdependency_libs " $deplib" }
	      }
	    }
	    setvar dependency_libs = "$newdependency_libs"
	    setvar newdlfiles = ''

	    for lib in $dlfiles {
	      case (lib) {
	      *.la {
	        func_basename $lib
		setvar name = "$func_basename_result"
		eval libdir=$(${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib)
		test -z $libdir && \
		  func_fatal_error "\`$lib' is not a valid libtool archive"
		func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name"
		}
	      * { func_append newdlfiles " $lib" }
	      }
	    }
	    setvar dlfiles = "$newdlfiles"
	    setvar newdlprefiles = ''
	    for lib in $dlprefiles {
	      case (lib) {
	      *.la {
		# Only pass preopened files to the pseudo-archive (for
		# eventual linking with the app. that links it) if we
		# didn't already link the preopened objects directly into
		# the library:
		func_basename $lib
		setvar name = "$func_basename_result"
		eval libdir=$(${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib)
		test -z $libdir && \
		  func_fatal_error "\`$lib' is not a valid libtool archive"
		func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name"
		}
	      }
	    }
	    setvar dlprefiles = "$newdlprefiles"
	  } else {
	    setvar newdlfiles = ''
	    for lib in $dlfiles {
	      case (lib) {
		[\\/]* | [A-Za-z]:[\\/]* { setvar abs = "$lib" }
		* { setvar abs = "$(pwd)"/$lib"" }
	      }
	      func_append newdlfiles " $abs"
	    }
	    setvar dlfiles = "$newdlfiles"
	    setvar newdlprefiles = ''
	    for lib in $dlprefiles {
	      case (lib) {
		[\\/]* | [A-Za-z]:[\\/]* { setvar abs = "$lib" }
		* { setvar abs = "$(pwd)"/$lib"" }
	      }
	      func_append newdlprefiles " $abs"
	    }
	    setvar dlprefiles = "$newdlprefiles"
	  }
	  $RM $output
	  # place dlname in correct position for cygwin
	  # In fact, it would be nice if we could use this code for all target
	  # systems that can't hard-code library paths into their executables
	  # and that have no shared library path variable independent of PATH,
	  # but it turns out we can't easily determine that from inspecting
	  # libtool variables, so we have to hard-code the OSs to which it
	  # applies here; at the moment, that means platforms that use the PE
	  # object format with DLL files.  See the long comment at the top of
	  # tests/bindir.at for full details.
	  setvar tdlname = "$dlname"
	  case (host) {
	    *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll {
	      # If a -bindir argument was supplied, place the dll there.
	      if test "x$bindir" != x 
	      {
		func_relative_path $install_libdir $bindir
		setvar tdlname = "$func_relative_path_result$dlname"
	      } else {
		# Otherwise fall back on heuristic.
		setvar tdlname = "../bin/$dlname"
	      }
	      }
	  }
	  $ECHO > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# Please DO NOT delete this file!
# It is necessary for linking the library.

# The name that we can dlopen(3).
dlname='$tdlname'

# Names of this library.
library_names='$library_names'

# The name of the static archive.
old_library='$old_library'

# Linker flags that can not go in dependency_libs.
inherited_linker_flags='$new_inherited_linker_flags'

# Libraries that this one depends upon.
dependency_libs='$dependency_libs'

# Names of additional weak libraries provided by this library
weak_library_names='$weak_libs'

# Version information for $libname.
current=$current
age=$age
revision=$revision

# Is this an already installed library?
installed=$installed

# Should we warn about portability when linking against -modules?
shouldnotlink=$module

# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'

# Directory that this library needs to be installed in:
libdir='$install_libdir'> $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# Please DO NOT delete this file!
# It is necessary for linking the library.

# The name that we can dlopen(3).
dlname='$tdlname'

# Names of this library.
library_names='$library_names'

# The name of the static archive.
old_library='$old_library'

# Linker flags that can not go in dependency_libs.
inherited_linker_flags='$new_inherited_linker_flags'

# Libraries that this one depends upon.
dependency_libs='$dependency_libs'

# Names of additional weak libraries provided by this library
weak_library_names='$weak_libs'

# Version information for $libname.
current=$current
age=$age
revision=$revision

# Is this an already installed library?
installed=$installed

# Should we warn about portability when linking against -modules?
shouldnotlink=$module

# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'

# Directory that this library needs to be installed in:
libdir='$install_libdir'"
	  if test $installed = no && test $need_relink = yes {
	    $ECHO >> $output "\
relink_command=\"$relink_command>> $output "\
relink_command=\"$relink_command\""
	  }
	}
      }

      # Do a symbolic link so that the libtool archive can be found in
      # LD_LIBRARY_PATH before the program is installed.
      func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
      }
    }
    exit $EXIT_SUCCESS
}

do { test $opt_mode = link || test $opt_mode = relink; } &&
    func_mode_link ${1+"$@"}


# func_mode_uninstall arg...
proc func_mode_uninstall {
    $opt_debug
    setvar RM = "$nonopt"
    setvar files = ''
    setvar rmforce = ''
    setvar exit_status = '0'

    # This variable tells wrapper scripts just to set variables rather
    # than running their programs.
    setvar libtool_install_magic = "$magic"for arg in @ARGV {
      case (arg) {
      -f { func_append RM " $arg"; setvar rmforce = 'yes' }
      -* { func_append RM " $arg" }
      * { func_append files " $arg" }
      }
    }

    test -z $RM && \
      func_fatal_help "you must specify an RM program"

    setvar rmdirs = ''

    for file in $files {
      func_dirname $file "" "."
      setvar dir = "$func_dirname_result"
      if test "X$dir" = X. {
	setvar odir = "$objdir"
      } else {
	setvar odir = ""$dir/$objdir""
      }
      func_basename $file
      setvar name = "$func_basename_result"
      test $opt_mode = uninstall && setvar odir = "$dir"

      # Remember odir for removal later, being careful to avoid duplicates
      if test $opt_mode = clean {
	case{
	  *" $odir "* { }
	  * { func_append rmdirs " $odir" }
	}
      }

      # Don't error if the file doesn't exist and rm -f was used.
      if do { test -L $file; } >/dev/null 2>&1 ||
	 do { test -h $file; } >/dev/null 2>&1 ||
	 test -f $file {
	:
      } elif test -d $file {
	setvar exit_status = '1'
	continue
      } elif test $rmforce = yes {
	continue
      }

      setvar rmfiles = "$file"

      case (name) {
      *.la {
	# Possibly a libtool archive, so verify it.
	if func_lalib_p $file {
	  func_source $dir/$name

	  # Delete the libtool libraries and symlinks.
	  for n in $library_names {
	    func_append rmfiles " $odir/$n"
	  }
	  test -n $old_library && func_append rmfiles " $odir/$old_library"

	  case (opt_mode) {
	  clean {
	    case{
	    *" $dlname "* { }
	    * { test -n $dlname && func_append rmfiles " $odir/$dlname" }
	    }
	    test -n $libdir && func_append rmfiles " $odir/$name $odir/${name}i"
	    }
	  uninstall {
	    if test -n $library_names {
	      # Do each command in the postuninstall commands.
	      func_execute_cmds $postuninstall_cmds 'test "$rmforce" = yes || exit_status=1'
	    }

	    if test -n $old_library {
	      # Do each command in the old_postuninstall commands.
	      func_execute_cmds $old_postuninstall_cmds 'test "$rmforce" = yes || exit_status=1'
	    }
	    # FIXME: should reinstall the best remaining shared library.
	    }
	  }
	}
	}

      *.lo {
	# Possibly a libtool object, so verify it.
	if func_lalib_p $file {

	  # Read the .lo file
	  func_source $dir/$name

	  # Add PIC object to the list of files to remove.
	  if test -n $pic_object &&
	     test $pic_object != none {
	    func_append rmfiles " $dir/$pic_object"
	  }

	  # Add non-PIC object to the list of files to remove.
	  if test -n $non_pic_object &&
	     test $non_pic_object != none {
	    func_append rmfiles " $dir/$non_pic_object"
	  }
	}
	}

      * {
	if test $opt_mode = clean  {
	  setvar noexename = "$name"
	  case (file) {
	  *.exe {
	    func_stripname '' '.exe' $file
	    setvar file = "$func_stripname_result"
	    func_stripname '' '.exe' $name
	    setvar noexename = "$func_stripname_result"
	    # $file with .exe has already been added to rmfiles,
	    # add $file without .exe
	    func_append rmfiles " $file"
	    }
	  }
	  # Do a test to see if this is a libtool program.
	  if func_ltwrapper_p $file {
	    if func_ltwrapper_executable_p $file {
	      func_ltwrapper_scriptname $file
	      setvar relink_command = ''
	      func_source $func_ltwrapper_scriptname_result
	      func_append rmfiles " $func_ltwrapper_scriptname_result"
	    } else {
	      setvar relink_command = ''
	      func_source $dir/$noexename
	    }

	    # note $name still contains .exe if it was in $file originally
	    # as does the version of $file that was added into $rmfiles
	    func_append rmfiles " $odir/$name $odir/${name}S.${objext}"
	    if test $fast_install = yes && test -n $relink_command {
	      func_append rmfiles " $odir/lt-$name"
	    }
	    if test "X$noexename" != "X$name"  {
	      func_append rmfiles " $odir/lt-${noexename}.c"
	    }
	  }
	}
	}
      }
      func_show_eval "$RM $rmfiles" 'exit_status=1'
    }

    # Try to remove the ${objdir}s in the directories where we deleted files
    for dir in $rmdirs {
      if test -d $dir {
	func_show_eval "rmdir $dir >/dev/null 2>&1"
      }
    }

    exit $exit_status
}

do { test $opt_mode = uninstall || test $opt_mode = clean; } &&
    func_mode_uninstall ${1+"$@"}

test -z $opt_mode && do {
  setvar help = "$generic_help"
  func_fatal_help "you must specify a MODE"
}

test -z $exec_cmd && \
  func_fatal_help "invalid operation mode \`$opt_mode'"

if test -n $exec_cmd {
  eval exec $exec_cmd
  exit $EXIT_FAILURE
}

exit $exit_status


# The TAGs below are defined such that we never get into a situation
# in which we disable both kinds of libraries.  Given conflicting
# choices, we go for a static library, that is the most portable,
# since we can't tell whether shared libraries were disabled because
# the user asked for that or because the platform doesn't support
# them.  This is particularly important on AIX, because we don't
# support having both static and shared libraries enabled at the same
# time on that platform, so we default to a shared-only configuration.
# If a disable-shared tag is given, we'll fallback to a static-only
# configuration.  But we'll never go from static-only to shared-only.

# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
setvar build_libtool_libs = 'no'
setvar build_old_libs = 'yes'
# ### END LIBTOOL TAG CONFIG: disable-shared

# ### BEGIN LIBTOOL TAG CONFIG: disable-static
setvar build_old_libs = $(case (build_libtool_libs) { yes { echo no} * { echo yes} })
# ### END LIBTOOL TAG CONFIG: disable-static

# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
# vi:sw=2

    (DONE benchmarks/testdata/ltmain.sh)
#!/usr/bin/env bash
#
# Common functions for benchmarks.
#

# Include guard.
test -n ${__BENCHMARKS_COMMON_SH:-} && return
readonly __BENCHMARKS_COMMON_SH=1

#readonly MACHINE1=flanders
#readonly MACHINE2=lenny

# 2023-11-29: machine1 is still lenny because it has bloaty, which doesn't
#             work with ELF data emitted by newer GCC on Debian 12
readonly MACHINE1=lenny
readonly MACHINE2=hoover

setvar OIL_VERSION = $(head -n 1 oil-version.txt)

# Used by devtools/release.sh
readonly BENCHMARK_DATA_OILS=$PWD/../benchmark-data/src/oils-for-unix-$OIL_VERSION

readonly OSH_CPP_NINJA_BUILD=_bin/cxx-opt/osh

readonly OSH_CPP_SH_BUILD=_bin/cxx-opt-sh/osh
readonly YSH_CPP_SH_BUILD=_bin/cxx-opt-sh/ysh

readonly OSH_CPP_BENCHMARK_DATA=$BENCHMARK_DATA_OILS/$OSH_CPP_SH_BUILD
readonly YSH_CPP_BENCHMARK_DATA=$BENCHMARK_DATA_OILS/$YSH_CPP_SH_BUILD

#
# Binaries we want to test, which can be overridden
#

setvar OSH_OVM = ${OSH_OVM:-_bin/osh}  # This is overridden by devtools/release.sh.

readonly OTHER_SHELLS=( bash dash mksh zsh )
readonly SHELLS=( ${OTHER_SHELLS[@]} bin/osh $OSH_OVM )

# Passed to awk in filter-provenance.  TODO: This could be a parameter
# Awk wants this to be \\. ?  Probably should stop using Awk.
readonly OSH_CPP_REGEX='_bin/.*/osh'

# NOTE: This is in {build,test}/common.sh too.
proc die {
  echo "FATAL: $[join(ARGV)]" 1>&2
  exit 1
}

proc log {
  echo @ARGV 1>&2
}

proc cmark '{
  # A filter to making reports
  PYTHONPATH=.' doctools/cmark.py @ARGV
}

# For compatibility, if cell starts with 'osh', apply the 'special' CSS class.
proc csv2html {
  web/table/csv2html.py --css-class-pattern 'special ^osh' @ARGV
}

# also in metrics/source-code.sh
proc hist { sort | uniq -c | sort -n; }

proc html-head '{
  PYTHONPATH=.' doctools/html_head.py @ARGV
}

proc benchmark-html-head {
  local title="$1"

  local base_url='../../web'

  html-head --title $title \
    "$base_url/table/table-sort.js" \
    "$base_url/table/table-sort.css" \
    "$base_url/base.css"\
    "$base_url/benchmarks.css"
}

proc filter-provenance {
  # create a regex bash|dash
  local pat=$(echo "$@" | sed 's/ /|/g')

  # Anchor it at the end only.  For _bin/cxx-opt/oils-for-unix.stripped and the
  # ../benchmark-data one.
  setvar pat = ""($pat)"\$"

  # 4th column is the shell
  awk -v pat="$pat" '$4 ~ pat { print }'
}

proc maybe-tree {
  ### Run tree command if it's installed
  if command -v tree {
    tree @ARGV
  }
}
    (DONE benchmarks/common.sh)
#!/usr/bin/env bash
#
# Usage:
#   benchmarks/uftrace.sh <function name>
#
# Examples:
#   benchmarks/uftrace.sh record-oils-cpp
#   benchmarks/uftrace.sh replay-alloc
#   benchmarks/uftrace.sh plugin-allocs
#
# TODO:
# - uftrace dump --chrome       # time-based trace
# - uftrace dump --flame-graph  # common stack traces, e.g. for allocation

set -o nounset
set -o pipefail
set -o errexit

source benchmarks/common.sh  # cmark function.  TODO: could use executable
source build/dev-shell.sh  # put uftrace in $PATH, R_LIBS_USER
source devtools/common.sh  # banner
source test/common.sh  # escape-html

readonly BASE_DIR=_tmp/uftrace

proc download {
  wget --no-clobber --directory _cache \
    https://github.com/namhyung/uftrace/archive/refs/tags/v0.13.tar.gz
    #https://github.com/namhyung/uftrace/archive/v0.9.3.tar.gz

}

proc extract {
  pushd _cache
  tar --extract -z < v0.13.tar.gz
  popd
}

proc build {
  cd _cache/uftrace-0.13
  ./configure
  make

  # It can't find some files unless we do this
  echo 'Run sudo make install'
}

proc ubuntu-hack {
  # Annoying: the plugin engine tries to look for the wrong file?
  # What's 3.6m.so vs 3.6.so ???

  cd /usr/lib/x86_64-linux-gnu
  ln -s libpython3.6m.so.1.0 libpython3.6.so
}

# https://github.com/namhyung/uftrace/wiki/Tutorial
proc hello-demo {
  cat >_tmp/hello.c <<< """
#include <stdio.h>

int main(void) {
  printf("Hello world\n");
  return 0;
}
"""

  gcc -o _tmp/hello -pg _tmp/hello.c

  uftrace _tmp/hello
}

proc record-oils-cpp {
  ### Record a trace, but limit to allocations functions, for size

  local out_dir=$1
  local unfiltered=${2:-}
  shift 2

  #local flags=(-F process::Process::RunWait -F process::Process::Process)

  local -a flags

  if test -n $unfiltered {
    setvar out_dir = "$out_dir.unfiltered"

    # Look for the pattern:
    # Alloc() {
    #   MarkSweepHeap::Allocate(24)
    #   syntax_asdl::line_span::line_span()
    # }
    setvar flags = ''(
      -F 'Alloc'
      -F 'MarkSweepHeap::Allocate' -A 'MarkSweepHeap::Allocate@arg2'
      -D 2
    )
    # If we don't filter at all, then it's huge
    # flags=()

  } else {
    # It's faster to filter just these function calls
    # Need .* for --demangle full

    setvar flags = ''(
      # low level allocation
      -F 'MarkSweepHeap::Allocate.*' -A 'MarkSweepHeap::Allocate.*@arg2'

      # typed allocation
      -F 'Alloc<.*'  # missing type info

      # Flexible array allocation
      # arg 1 is str_len
      -F 'NewStr.*' -A 'NewStr.*@arg1'
      -F 'OverAllocatedStr.*' -A 'OverAllocatedStr.*@arg1'

      # This constructor doesn't matter.  We care about the interface in in
      # mycpp/gc_alloc.h
      # -F 'Str::Str.*'

      # arg1 is number of elements of type T
      -F 'NewSlab<.*' -A 'NewSlab<.*@arg1'
      # -F 'Slab<.*>::Slab.*'

      # Fixed size header allocation
      # arg2 is the number of items to reserve
      # -F 'List<.*>::List.*'
      -F 'List<.*>::reserve.*' -A 'List<.*>::reserve.*@arg2'
      # -F 'Dict<.*>::Dict.*'  # does not allocate
      -F 'Dict<.*>::reserve.*' -A 'Dict<.*>::reserve.*@arg2'

      # Common object
      # -F 'syntax_asdl::Token::Token'

      -D 1
    )

    # Problem: some of these aren't allocations
    # -F 'Tuple2::Tuple2'
    # -F 'Tuple3::Tuple3'
    # -F 'Tuple4::Tuple4'

    # StrFromC calls NewStr, so we don't need it
    # -F 'StrFromC' -A 'StrFromC@arg1' -A 'StrFromC@arg2'
  }

  local bin=_bin/cxx-uftrace/osh
  ninja $bin

  mkdir -p $out_dir
  time uftrace record --demangle full -d $out_dir ${flags[@]} $bin @ARGV

  ls -d $out_dir/
  ls -l --si $out_dir/
}

proc run-tasks {
  while read task {
    banner "$task: utrace record"

    # TODO: Could share with benchmarks/gc
    case (task) {
      parse.configure-cpython {
        setvar data_file = ''Python-2.7.13/configure''
        }
      parse.abuild {
        setvar data_file = ''benchmarks/testdata/abuild''
        }
    }

    # Construct argv for each task
    local -a argv
    case (task) {
      parse.* {
        setvar argv = ''( --ast-format none -n $data_file  )
        }

      ex.compute-fib {
        setvar argv = ''( benchmarks/compute/fib.sh 10 44 )
        }

      ex.bashcomp-excerpt {
        # NOTE: benchmarks/gc.sh uses the larger clang.txt file
        setvar argv = ''( benchmarks/parse-help/pure-excerpt.sh parse_help_file 
               benchmarks/parse-help/mypy.txt )
        }

    }

    local out_dir=$BASE_DIR/raw/$task

    record-oils-cpp $out_dir '' ${argv[@]}
  }
}

proc print-tasks {
  # Same as benchmarks/gc
  local -a tasks=(
    # This one is a bit big
    # parse.configure-cpython

    parse.abuild
    ex.bashcomp-excerpt
    ex.compute-fib
  )

  for task in "${tasks[@]}" {
    echo $task
  }
}

proc measure-all {
  print-tasks | run-tasks
}

proc frequent-calls {
  ### Histogram

  local out_dir=$1
  uftrace report -d $out_dir -s call --demangle full
}

proc call-graph {
  ### Time-based trace

  local out_dir=$1
  uftrace graph -d $out_dir
}

proc tsv-plugin {
  local task=${1:-ex.compute-fib}

  local dir=$BASE_DIR/raw/$task

  # On the big configure-coreutils script, this takes 10 seconds.  That's
  # acceptable.  Gives 2,402,003 allocations.

  local out_dir=_tmp/uftrace/stage1/$task
  mkdir -p $out_dir
  time uftrace script --demangle full -d $dir -S benchmarks/uftrace_allocs.py $out_dir

  wc -l $out_dir/*.tsv
}

proc report-all {
  print-tasks | while read task {
    banner "$task: report"

    frequent-calls $BASE_DIR/raw/$task

    echo
  }
}

proc export-all {
  if uftrace --version | grep python3 {
    echo 'uftrace has Python 3 plugin support'
  } else {
    die 'uftrace is MISSING Python 3 plugin support'
  }

  # TODO: Join into a single TSV file
  print-tasks | while read task {
    banner "$task: export to TSV with Python3 plugin"
    time tsv-plugin $task
  }
}

proc html-index {
  echo '<body style="margin: 0 auto; width: 40em; font-size: large">'

  cmark <<< '''
# uftrace reports

Workloads:
'''

  # Link to text files
  print-tasks | while read task {
    echo "<a href="stage2/$task.txt">$task</a> <br/>"
  }

  cmark <<< '## Summary'

  echo '<pre>'

  cat $BASE_DIR/stage2/summary.txt | escape-html

  echo '</pre>'
  echo '</body>'
}

proc analyze-all {
  local in_dir=$BASE_DIR/stage1/
  local out_dir=$BASE_DIR/stage2/

  # prepare dirs for R to write to
  print-tasks | while read task {
    mkdir -v -p $out_dir/$task
  }

  # Writes stage2/summary.txt
  benchmarks/report.R uftrace  $in_dir $out_dir

  html-index > $BASE_DIR/index.html
  echo "Wrote $BASE_DIR/index.html"
}


# Hm this shows EVERY call stack that produces a list!

# uftrace graph usage shown here
# https://github.com/namhyung/uftrace/wiki/Tutorial

proc replay-alloc {
  local out_dir=$1

  # call graph
  #uftrace graph -C 'MarkSweepHeap::Allocate'

  # shows what calls this function
  #uftrace replay -C 'MarkSweepHeap::Allocate'

  # shows what this function calls
  #uftrace replay -F 'MarkSweepHeap::Allocate'

  # filters may happen at record or replay time

  # depth of 1
  #uftrace replay -D 1 -F 'MarkSweepHeap::Allocate'

  uftrace replay -D 1 -F 'MarkSweepHeap::Allocate'
}

proc plugin {
  # Note this one likes UNFILTERED data
  uftrace script -S benchmarks/uftrace_plugin.py
}

proc soil-run {
  measure-all
  export-all
  analyze-all

}

@ARGV
    (DONE benchmarks/uftrace.sh)
#!/usr/bin/env bash
#
# Keep track of benchmark data provenance.
#
# Usage:
#   benchmarks/id.sh <function name>

set -o nounset
set -o pipefail
set -o errexit

setvar REPO_ROOT = $(cd $(dirname $0)/..; pwd)
readonly REPO_ROOT

source build/common.sh  # for $CLANG
source benchmarks/common.sh
source soil/common.sh  # find-dir-html
source test/tsv-lib.sh  # tsv-row

proc print-job-id {
  date '+%Y-%m-%d__%H-%M-%S'
}

# TODO: add benchmark labels/hashes for osh and all other shells
#
# Need to archive labels too.
#
# TODO: How do I make sure the zsh label is current?  Across different
# machines?
#
# What happens when zsh is silently upgraded?
# I guess before every benchmark, you have to run the ID collection.  Man
# that is a lot of code.
#
# Should I make symlinks to the published location?
#
# Maybe bash/dash/mksh/zsh should be invoked through a symlink?
# Every symlink is a shell runtime version, and it has an associated
# toolchain?

# Platform is ambient?
# _tmp/
#   shell-id/
#     bash/
#       HASH.txt
#       version.txt
#     dash/
#       HASH.txt
#       version.txt
#   host-id/
#     lisa/
#       HASH.txt
#       cpuinfo.txt

# ../benchmark-data/
#   shell-id/
#     bash-$HASH/
#     osh-$HASH/   # osh-cpython, osh-ovm?   osh-opy-ovm?  Too many dimensions.
#                # the other shells don't have this?
#     zsh-$HASH/
#   host-id/
#     lisa-$HASH/

proc _dump-if-exists {
  local path=$1
  local out=$2
  if ! test -f $path {
    return
  }
  cat $path > $out
}

#
# Shell ID
#

proc dump-shell-id {
  local sh_path=$1
  local out_dir=$2

  if ! command -v $sh_path >/dev/null {
    die "dump-shell-id: Couldn't find $sh_path"
  }

  mkdir -p $out_dir

  echo $sh_path > $out_dir/sh-path.txt

  # Add extra repository info for osh.
  case (sh_path) {
    */osh* {
      local branch
      setvar branch = $(git rev-parse --abbrev-ref HEAD)
      echo $branch > $out_dir/git-branch.txt
      git rev-parse $branch > $out_dir/git-commit-hash.txt
      }
  }

  local sh_name
  setvar sh_name = $(basename $sh_path)

  case (sh_name) {
    bash|zsh|yash {
      $sh_path --version > $out_dir/version.txt
      }
    osh {
      case (sh_path) {
        *_bin/*/osh {
          # Doesn't support --version yet
          }
        * {
          $sh_path --version > $out_dir/osh-version.txt
          }
      }
      }
    # oils-for-unix|oils-for-unix.stripped)
    #  ;;
    dash|mksh {
      # These don't have version strings!
      dpkg -s $sh_name > $out_dir/dpkg-version.txt
      }

    # not a shell, but useful for benchmarks/compute
    python2 {
      $sh_path -V 2> $out_dir/version.txt
      }
    * {
      die "Invalid shell '$sh_name'"
      }
  }
}

proc _shell-id-hash {
  local src=$1

  local file

  # for shells and Python
  setvar file = "$src/version.txt"
  test -f $file && cat $file

  # Only hash the dimensions we want to keep
  setvar file = "$src/dpkg-version.txt"
  test -f $file && egrep '^Version' $file

  # Interpreter as CPython vs. OVM is what we care about, so
  # select 'Interpreter:' but not 'Interpreter version:'.
  # For example, the version is different on Ubuntu Bionic vs. Trusty, but we
  # ignore that.
  setvar file = "$src/osh-version.txt"
  test -f $file && egrep '^Oil version|^Interpreter:' $file

  # For OSH
  setvar file = "$src/git-commit-hash.txt"
  test -f $file && cat $file

  return 0
}

proc publish-shell-id {
  ### Copy temp directory to hashed location

  local src=$1  # e.g. _tmp/prov-tmp/osh
  local dest_base=${2:-../benchmark-data/shell-id}  # or _tmp/shell-id

  local sh_path sh_name
  read sh_path < $src/sh-path.txt
  setvar sh_name = $(basename $sh_path)

  local hash
  setvar hash = $(_shell-id-hash $src | md5sum)  # not secure, an identifier

  local id="${hash:0:8}"
  local dest="$dest_base/$sh_name-$id"

  mkdir -p $dest
  cp --no-target-directory --recursive $src/ $dest/

  echo $hash > $dest/HASH.txt

  # for .wwz file
  find-dir-html $dest

  log "Published shell ID to $dest"

  echo $id
}

#
# Platform ID
#

# Events that will change the env for a given machine:
# - kernel upgrade
# - distro upgrade

# How about ~/git/oilshell/benchmark-data/host-id/lisa-$HASH
# How to calculate the hash though?

proc dump-host-id {
  local out_dir=${1:-_tmp/host-id/$(hostname)}

  mkdir -p $out_dir

  hostname > $out_dir/hostname.txt

  # does it make sense to do individual fields like -m?
  # avoid parsing?
  # We care about the kernel and the CPU architecture.
  # There is a lot of redundant information there.
  uname -m > $out_dir/machine.txt
  # machine
  do { uname --kernel-release 
    uname --kernel-version
  } > $out_dir/kernel.txt

  _dump-if-exists /etc/lsb-release $out_dir/lsb-release.txt

  # remove the cpu MHz field, which changes a lot
  grep -i -v 'cpu mhz' /proc/cpuinfo > $out_dir/cpuinfo.txt
  # mem info doesn't make a difference?  I guess it's just nice to check that
  # it's not swapping.  But shouldn't be part of the hash.

  grep '^MemTotal' /proc/meminfo > $out_dir/meminfo.txt

  #head $out_dir/* 1>&2  # don't write to stdout
}

# There is already concept of the triple?
# http://wiki.osdev.org/Target_Triplet
# It's not exactly the same as what we need here, but close.

proc _host-id-hash {
  local src=$1

  # Don't hash CPU or memory
  #cat $src/cpuinfo.txt
  #cat $src/hostname.txt  # e.g. lisa

  cat $src/machine.txt  # e.g. x86_64 
  cat $src/kernel.txt

  # OS
  local file=$src/lsb-release.txt
  if test -f $file {
    cat $file
  }

  return 0
}

# Writes a short ID to stdout.
proc publish-host-id {
  local src=$1  # e.g. _tmp/host-id/lisa
  local dest_base=${2:-../benchmark-data/host-id}

  local name
  setvar name = $(basename $src)

  local hash
  setvar hash = $(_host-id-hash $src | md5sum)  # not secure, an identifier

  local id="${hash:0:8}"
  local dest="$dest_base/$name-$id"

  mkdir -p $dest
  cp --no-target-directory --recursive $src/ $dest/

  echo $hash > $dest/HASH.txt

  # for .wwz file
  find-dir-html $dest

  log "Published host ID to $dest"

  echo $id
}

#
# Compilers
# 

proc dump-compiler-id {
  local cc=$1  # path to the compiler
  local out_dir=${2:-_tmp/compiler-id/$(basename $cc)}

  mkdir -p $out_dir

  case (cc) {
    */gcc {
      $cc --version
      # -v has more details, but they might be overkill.
      }
    */clang {
      $cc --version
      # -v has stuff we don't want
      }
  } > $out_dir/version.txt
}

proc _compiler-id-hash {
  local src=$1

  # Remove some extraneous information from clang.
  cat $src/version.txt | grep -v InstalledDir 
}

# Writes a short ID to stdout.
proc publish-compiler-id {
  local src=$1  # e.g. _tmp/compiler-id/clang
  local dest_base=${2:-../benchmark-data/compiler-id}

  local name=$(basename $src)
  local hash
  setvar hash = $(_compiler-id-hash $src | md5sum)  # not secure, an identifier

  local id="${hash:0:8}"
  local dest="$dest_base/$name-$id"

  mkdir -p $dest
  cp --no-target-directory --recursive $src/ $dest/

  echo $hash > $dest/HASH.txt

  log "Published compiler ID to $dest"

  echo $id
}

#
# Table Output
#

# Writes a table of host and shells to stdout.  Writes text files and
# calculates IDs for them as a side effect.
#
# The table can be passed to other benchmarks to ensure that their provenance
# is recorded.

proc shell-provenance-2 {
  ### Write to _tmp/provenance.{txt,tsv} and $out_dir/{shell,host-id}

  local maybe_host=$1  # if it exists, it overrides the host
  local job_id=$2
  local out_dir=$3
  shift 3

  # log "*** shell-provenance"

  mkdir -p _tmp/provenance

  local host_name
  if test -n $maybe_host {  # label is often 'no-host'
    setvar host_name = "$maybe_host"
  } else {
    setvar host_name = $(hostname)
  }

  log "*** $maybe_host $host_name $job_id $out_dir"

  local tmp_dir=_tmp/prov-tmp/$host_name
  dump-host-id $tmp_dir

  local host_hash
  setvar host_hash = $(publish-host-id $tmp_dir "$out_dir/host-id")
  local shell_hash

  local out_txt=_tmp/provenance.txt  # Legacy text file
  echo -n '' > $out_txt  # trunacte, no header

  local out_tsv=_tmp/provenance.tsv
  tsv-row job_id host_name host_hash sh_path shell_hash > $out_tsv

  local i=0

  for sh_path in "$@" {
    # There can be two different OSH

    setvar tmp_dir = "_tmp/prov-tmp/shell-$i"
    setvar i = $((i + 1))

    dump-shell-id $sh_path $tmp_dir

    # writes to ../benchmark-data or _tmp/provenance
    setvar shell_hash = $(publish-shell-id $tmp_dir "$out_dir/shell-id")

    # note: filter-provenance depends on $4 being $sh_path
    # APPEND to txt
    echo "$job_id $host_name $host_hash $sh_path $shell_hash" >> $out_txt

    tsv-row $job_id $host_name $host_hash $sh_path $shell_hash >> $out_tsv
  }

  log "Wrote $out_txt and $out_tsv"
}

proc compiler-provenance {
  local job_id
  setvar job_id = $(print-job-id)

  local host
  setvar host = $(hostname)

  # Filename
  local out=_tmp/provenance/${host}.${job_id}.compiler-provenance.txt

  local tmp_dir=_tmp/host-id/$host
  dump-host-id $tmp_dir

  local host_hash
  setvar host_hash = $(publish-host-id $tmp_dir)

  local compiler_hash

  # gcc is assumed to be in the $PATH.
  for compiler_path in $(which gcc) $CLANG {
    local name=$(basename $compiler_path)

    setvar tmp_dir = "_tmp/compiler-id/$name"
    dump-compiler-id $compiler_path $tmp_dir

    setvar compiler_hash = $(publish-compiler-id $tmp_dir)

    echo "$job_id $host $host_hash $compiler_path $compiler_hash"
  } > $out

  log "Wrote $out"

  # Return value used in command sub
  echo $out
}

proc out-param {
  declare -n out=$1

  setvar out = 'returned'
}

if test $(basename $0) = 'id.sh' {
  @ARGV
}

    (DONE benchmarks/id.sh)