Skip to content

Commit

Permalink
custom template delimiters (#62)
Browse files Browse the repository at this point in the history
* custom template delimiters

* added token removals
  • Loading branch information
unai-programmfabrik authored Sep 2, 2021
1 parent 7775886 commit c34388e
Show file tree
Hide file tree
Showing 18 changed files with 310 additions and 118 deletions.
47 changes: 46 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ You can also set the log verbosity per single testcase. The greater verbosity wi
./apitest -d apitests --replace-host my.fancy.host:8989
```


# Manifest

Manifest is loaded as **template**, so you can use variables, Go **range** and **if** and others.
Expand Down Expand Up @@ -364,6 +363,51 @@ Manifest is loaded as **template**, so you can use variables, Go **range** and *
}
```

## Override template delimiters

Go template delimiters can be redefined as part of a single line comment in any of these syntax:

```
// template-delims: <delim_left> <delim_right>
/* template-delims: <delim_left> <delim_right> */
```

Examples:
```
// template-delims: /* */
/* template-delims: // // */
// template-delims {{ }}
/* template-delims: {* *} */
```

** All external tests/requests/responses inherit those delimiters if not overriden in their template **

## Remove template 'placeholders'

Go templates may break the proper JSONC format even when separators are comments.
So we could use placeholders for filling missing parts then strip them.
```
// template-remove-tokens: <token> [<token>]*
/* template-remove-tokens: <token> [<token>] */
```

Example:
```
// template-delims: /* */
// template-remove-tokens: "delete_me"
{
"prop": /* datastore "something" */"delete_me"
}
```
This would be an actual proper JSONC as per the `"delete_me"` string.
However that one will be stripped before parsing the template, which would be just:
```
{
"prop": /* datastore "something" */
}
```

** Unlike with delimiters, external tests/requests/responses don't inherit those removals, and need to be specified per file.

## Run tests in parallel

Expand All @@ -384,6 +428,7 @@ sense**
"response": "@simple.bin"
}
```

## Binary data comparison

The tool is able to do a comparison with a binary file. Here we take a MD5 hash of the file and and then later compare
Expand Down
23 changes: 12 additions & 11 deletions api_testcase.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ import (

"github.com/programmfabrik/apitest/pkg/lib/datastore"

"github.com/programmfabrik/apitest/pkg/lib/cjson"

"github.com/programmfabrik/apitest/pkg/lib/api"
"github.com/programmfabrik/apitest/pkg/lib/compare"
"github.com/programmfabrik/apitest/pkg/lib/report"
Expand Down Expand Up @@ -171,7 +169,7 @@ func (testCase *Case) checkCollectResponse(request api.Request, response api.Res
return -1, fmt.Errorf("error loading check response: %s", err)
}

jsonRespArray := util.JsonArray{}
var jsonRespArray util.JsonArray
switch t := loadedResponses.(type) {
case util.JsonArray:
jsonRespArray = t
Expand Down Expand Up @@ -231,6 +229,7 @@ func (testCase Case) executeRequest(counter int) (compare.CompareResult, api.Req
err = testCase.dataStore.SetMap(testCase.Store)
if err != nil {
err = fmt.Errorf("error setting datastore map:%s", err)
return responsesMatch, req, apiResp, err
}

//Do Request
Expand Down Expand Up @@ -313,7 +312,7 @@ func (testCase Case) LogResp(response api.Response) {
func (testCase Case) LogReq(req api.Request) {
errString := fmt.Sprintf("[REQUEST]:\n%s\n\n", limitLines(req.ToString(logCurl), Config.Apitest.Limit.Request))

if !testCase.ReverseTestResult && !testCase.ContinueOnFailure && testCase.LogNetwork != nil && *testCase.LogNetwork == false {
if !testCase.ReverseTestResult && !testCase.ContinueOnFailure && testCase.LogNetwork != nil && !*testCase.LogNetwork {
testCase.ReportElem.SaveToReportLogF(errString)
logrus.Debug(errString)
}
Expand Down Expand Up @@ -403,7 +402,7 @@ func (testCase Case) run() (bool, error) {
}

//break if timeout or we do not have a repeater
if timedOut := time.Now().Sub(startTime) > (time.Duration(testCase.Timeout) * time.Millisecond); timedOut && testCase.Timeout != -1 {
if timedOut := time.Since(startTime) > (time.Duration(testCase.Timeout) * time.Millisecond); timedOut && testCase.Timeout != -1 {
if timedOut && testCase.Timeout > 0 {
logrus.Warnf("Pull Timeout '%dms' exceeded", testCase.Timeout)
r.SaveToReportLogF("Pull Timeout '%dms' exceeded", testCase.Timeout)
Expand Down Expand Up @@ -502,23 +501,24 @@ func (testCase Case) loadRequestSerialization() (api.Request, error) {
spec api.Request
)

_, requestData, err := template.LoadManifestDataAsObject(*testCase.RequestData, testCase.manifestDir, testCase.loader)
reqLoader := testCase.loader
_, requestData, err := template.LoadManifestDataAsObject(*testCase.RequestData, testCase.manifestDir, reqLoader)
if err != nil {
return spec, fmt.Errorf("error loading request data: %s", err)
}
specBytes, err := json.Marshal(requestData)
if err != nil {
return spec, fmt.Errorf("error marshaling req: %s", err)
}
err = cjson.Unmarshal(specBytes, &spec)
err = util.Unmarshal(specBytes, &spec)
spec.ManifestDir = testCase.manifestDir
spec.DataStore = testCase.dataStore

if spec.ServerURL == "" {
spec.ServerURL = testCase.ServerURL
}
if len(spec.Headers) == 0 {
spec.Headers = make(map[string]*string, 0)
spec.Headers = make(map[string]*string)
}
for k, v := range testCase.standardHeader {
if spec.Headers[k] == nil {
Expand All @@ -527,7 +527,7 @@ func (testCase Case) loadRequestSerialization() (api.Request, error) {
}

if len(spec.HeaderFromStore) == 0 {
spec.HeaderFromStore = make(map[string]string, 0)
spec.HeaderFromStore = make(map[string]string)
}
for k, v := range testCase.standardHeaderFromStore {
if spec.HeaderFromStore[k] == "" {
Expand All @@ -543,7 +543,8 @@ func (testCase Case) loadResponseSerialization(genJSON interface{}) (api.Respons
spec api.ResponseSerialization
)

_, responseData, err := template.LoadManifestDataAsObject(genJSON, testCase.manifestDir, testCase.loader)
resLoader := testCase.loader
_, responseData, err := template.LoadManifestDataAsObject(genJSON, testCase.manifestDir, resLoader)
if err != nil {
return spec, fmt.Errorf("error loading response data: %s", err)
}
Expand All @@ -552,7 +553,7 @@ func (testCase Case) loadResponseSerialization(genJSON interface{}) (api.Respons
if err != nil {
return spec, fmt.Errorf("error marshaling res: %s", err)
}
err = cjson.Unmarshal(specBytes, &spec)
err = util.Unmarshal(specBytes, &spec)
if err != nil {
return spec, fmt.Errorf("error unmarshaling res: %s", err)
}
Expand Down
37 changes: 20 additions & 17 deletions api_testsuite.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
"github.com/sirupsen/logrus"

"github.com/programmfabrik/apitest/internal/httpproxy"
"github.com/programmfabrik/apitest/pkg/lib/cjson"
"github.com/programmfabrik/apitest/pkg/lib/datastore"
"github.com/programmfabrik/apitest/pkg/lib/filesystem"
"github.com/programmfabrik/apitest/pkg/lib/report"
Expand Down Expand Up @@ -51,6 +50,7 @@ type Suite struct {
httpServerDir string
idleConnsClosed chan struct{}
HTTPServerHost string
loader template.Loader
}

// NewTestSuite creates a new suite on which we execute our tests on. Normally this only gets call from within the apitest main command
Expand All @@ -76,15 +76,14 @@ func NewTestSuite(config TestToolConfig, manifestPath string, manifestDir string
datastore: datastore,
index: index,
}

manifest, err := suitePreload.loadManifest()
if err != nil {
err = fmt.Errorf("error loading manifest: %s", err)
suitePreload.reporterRoot.Failure = fmt.Sprintf("%s", err)
return &suitePreload, err
}

err = cjson.Unmarshal(manifest, &suitePreload)
err = util.Unmarshal(manifest, &suitePreload)
if err != nil {
err = fmt.Errorf("error unmarshaling manifest '%s': %s", manifestPath, err)
suitePreload.reporterRoot.Failure = fmt.Sprintf("%s", err)
Expand Down Expand Up @@ -137,13 +136,14 @@ func NewTestSuite(config TestToolConfig, manifestPath string, manifestDir string
}
// fmt.Printf("%s", string(manifest))
// We unmarshall the final manifest into the final working suite
err = cjson.Unmarshal(manifest, &suite)
err = util.Unmarshal(manifest, &suite)
if err != nil {
err = fmt.Errorf("error unmarshaling manifest '%s': %s", manifestPath, err)
suite.reporterRoot.Failure = fmt.Sprintf("%s", err)
return &suite, err
}
suite.HTTPServerHost = suitePreload.HTTPServerHost
suite.loader = suitePreload.loader

//Append suite manifest path to name, so we know in an automatic setup where the test is loaded from
suite.Name = fmt.Sprintf("%s (%s)", suite.Name, manifestPath)
Expand Down Expand Up @@ -173,7 +173,7 @@ func (ats *Suite) Run() bool {
success := true
for k, v := range ats.Tests {
child := r.NewChild(strconv.Itoa(k))
sTestSuccess := ats.parseAndRunTest(v, ats.manifestDir, ats.manifestPath, k, false, child)
sTestSuccess := ats.parseAndRunTest(v, ats.manifestDir, ats.manifestPath, k, false, child, ats.loader)
child.Leave(sTestSuccess)
if !sTestSuccess {
success = false
Expand Down Expand Up @@ -207,10 +207,11 @@ type TestContainer struct {
Path string
}

func (ats *Suite) parseAndRunTest(v interface{}, manifestDir, testFilePath string, k int, runParallel bool, r *report.ReportElement) bool {
func (ats *Suite) parseAndRunTest(v interface{}, manifestDir, testFilePath string, k int, runParallel bool, r *report.ReportElement, rootLoader template.Loader) bool {
//Init variables
// logrus.Warnf("Test %s, Prev delimiters: %#v", testFilePath, rootLoader.Delimiters)
loader := template.NewLoader(ats.datastore)

loader.Delimiters = rootLoader.Delimiters
loader.HTTPServerHost = ats.HTTPServerHost
serverURL, err := url.Parse(ats.Config.ServerURL)
if err != nil {
Expand Down Expand Up @@ -240,7 +241,7 @@ func (ats *Suite) parseAndRunTest(v interface{}, manifestDir, testFilePath strin

//Try to directly unmarshal the manifest into testcase array
var testCases []json.RawMessage
err = cjson.Unmarshal(testObj, &testCases)
err = util.Unmarshal(testObj, &testCases)
if err == nil {
d := 1
if isParallelPathSpec || runParallel {
Expand Down Expand Up @@ -279,28 +280,28 @@ func (ats *Suite) parseAndRunTest(v interface{}, manifestDir, testFilePath strin
// If objects are different, we did have a Go template, recurse one level deep
if string(requestBytes) != string(testObj) {
return ats.parseAndRunTest([]byte(requestBytes), filepath.Join(manifestDir, dir),
testFilePath, k, isParallelPathSpec, r)
testFilePath, k, isParallelPathSpec, r, loader)
}

// Its a JSON at this point, assign and proceed to parse
testObj = requestBytes

var singleTest json.RawMessage
err = cjson.Unmarshal(testObj, &singleTest)
err = util.Unmarshal(testObj, &singleTest)
if err == nil {

//Check if is @ and if so load the test
if util.IsPathSpec(testObj) {
var sS string

err := cjson.Unmarshal(testObj, &sS)
err := util.Unmarshal(testObj, &sS)
if err != nil {
r.SaveToReportLog(err.Error())
logrus.Error(fmt.Errorf("can not unmarshal (%s): %s", testFilePath, err))
return false
}

return ats.parseAndRunTest(sS, filepath.Join(manifestDir, dir), testFilePath, k, isParallelPathSpec, r)
return ats.parseAndRunTest(sS, filepath.Join(manifestDir, dir), testFilePath, k, isParallelPathSpec, r, template.Loader{})
} else {
return ats.runSingleTest(TestContainer{CaseByte: testObj, Path: filepath.Join(manifestDir, dir)}, r, testFilePath, loader, k, runParallel)
}
Expand All @@ -319,7 +320,7 @@ func (ats *Suite) runSingleTest(tc TestContainer, r *report.ReportElement, testF
r.SetName(testFilePath)

var test Case
jErr := cjson.Unmarshal(tc.CaseByte, &test)
jErr := util.Unmarshal(tc.CaseByte, &test)
if jErr != nil {

r.SaveToReportLog(jErr.Error())
Expand Down Expand Up @@ -348,7 +349,6 @@ func (ats *Suite) runSingleTest(tc TestContainer, r *report.ReportElement, testF
if test.LogShort == nil {
test.LogShort = &ats.Config.LogShort
}

if test.ServerURL == "" {
test.ServerURL = ats.Config.ServerURL
}
Expand Down Expand Up @@ -384,7 +384,10 @@ func (ats *Suite) loadManifest() ([]byte, error) {
if err != nil {
return res, fmt.Errorf("error loading manifest (%s): %s", ats.manifestPath, err)
}
return loader.Render(manifestTmpl, ats.manifestDir, nil)

b, err := loader.Render(manifestTmpl, ats.manifestDir, nil)
ats.loader = loader
return b, err
}

func testGoRoutine(k, ki int, v json.RawMessage, ats *Suite, testFilePath, manifestDir, dir string, r *report.ReportElement, loader template.Loader, waitCh, succCh chan bool, runParallel bool) {
Expand All @@ -394,14 +397,14 @@ func testGoRoutine(k, ki int, v json.RawMessage, ats *Suite, testFilePath, manif
switch util.IsPathSpec(v) {
case true:
var sS string
err := cjson.Unmarshal(v, &sS)
err := util.Unmarshal(v, &sS)
if err != nil {
r.SaveToReportLog(err.Error())
logrus.Error(fmt.Errorf("can not unmarshal (%s): %s", testFilePath, err))
success = false
break
}
success = ats.parseAndRunTest(sS, filepath.Join(manifestDir, dir), testFilePath, k+ki, runParallel, r)
success = ats.parseAndRunTest(sS, filepath.Join(manifestDir, dir), testFilePath, k+ki, runParallel, r, loader)
default:
success = ats.runSingleTest(TestContainer{CaseByte: v, Path: filepath.Join(manifestDir, dir)},
r, testFilePath, loader, ki, runParallel)
Expand Down
21 changes: 21 additions & 0 deletions api_testsuite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,26 @@ func TestLoadManifest(t *testing.T) {
if string(res) != `{"testload": "loaded"}` {
t.Errorf(`Exp '{"testload": "loaded"}' != '%s' Got`, res)
}
}

func TestLoadManifestCustomDelimiters(t *testing.T) {
filesystem.Fs = afero.NewMemMapFs()

afero.WriteFile(filesystem.Fs, "externalFile", []byte(`{"load":{"me":"loaded"}}`), 0644)

afero.WriteFile(filesystem.Fs, "testManifest.json", []byte(`// template-delims: ## ##
// template-remove-tokens: "<placeholder>" "...."
{"testload": ## file "externalFile" | qjson "load.me" ##}"...."`), 0644)

s := Suite{manifestPath: "testManifest.json"}
res, err := s.loadManifest()
if err != nil {
t.Fatal(err)
}

if string(res) != `
{"testload": "loaded"}` {
t.Errorf(`Exp '{"testload": "loaded"}' != '%s' Got`, res)
}
}
Loading

0 comments on commit c34388e

Please sign in to comment.