From 60b1d9d3ecee6e57d7e55146e9babcaf39101ba1 Mon Sep 17 00:00:00 2001 From: Aaron Weaver <aaronweaver@users.noreply.github.com> Date: Wed, 24 Jan 2018 16:32:43 -0500 Subject: [PATCH] Initial commit --- .travis.yml | 34 + README.md | 1 + build.sh | 6 + build/build-dockers.sh | 22 + build/combine-yaml.py | 66 ++ build/docker-hub.sh | 22 + controller/.DS_Store | Bin 0 -> 6148 bytes controller/appsec.pipeline | 49 ++ controller/master.yaml | 145 ++++ controller/secpipeline-config.yaml | 765 ++++++++++++++++++++ controller/tool-config-template.config | 97 +++ dockers/appsecpipeline/docker-compose.yml | 38 + dockers/appsecpipeline/launchenv.sh | 6 + dockers/base/dockerfile-base | 30 + dockers/base/dockerfile-base-tools | 61 ++ dockers/base/dockerfile-node | 28 + dockers/base/dockerfile-ruby | 34 + dockers/base/dockerfile-sast | 45 ++ dockers/base/dockerfile-zap | 38 + dockers/base/setupdocker.sh | 11 + setup.bash | 80 +++ tools/.DS_Store | Bin 0 -> 18436 bytes tools/README.md | 265 +++++++ tools/appspider/AppSpider.py | 331 +++++++++ tools/appspider/PyAppSpider.py | 826 ++++++++++++++++++++++ tools/appspider/config.yaml | 60 ++ tools/appspider/parser.py | 123 ++++ tools/arachni/config.yaml | 54 ++ tools/arachni/parser.py | 123 ++++ tools/bandit/config.yaml | 31 + tools/bandit/parser.py | 123 ++++ tools/brakeman/config.yaml | 29 + tools/checkmarx/.DS_Store | Bin 0 -> 6148 bytes tools/checkmarx/PyCheckmarx.py | 512 ++++++++++++++ tools/checkmarx/ScanProject.py | 25 + tools/checkmarx/config.yaml | 61 ++ tools/checkmarx/requirements.txt | 19 + tools/cloc/config.yaml | 26 + tools/defectdojo/.DS_Store | Bin 0 -> 8196 bytes tools/defectdojo/config.yaml | 53 ++ tools/defectdojo/dojo_ci_cd.py | 344 +++++++++ tools/dependency-check/config.yaml | 35 + tools/git/.DS_Store | Bin 0 -> 6148 bytes tools/git/config.yaml | 33 + tools/git/git.sh | 12 + tools/health.py | 24 + tools/junit.py | 48 ++ tools/launch.py | 318 +++++++++ tools/nikto/config.yaml | 37 + tools/nikto/parser.py | 123 ++++ tools/nmap/config.yaml | 58 ++ tools/prepenv/.DS_Store | Bin 0 -> 8196 bytes tools/prepenv/config.yaml | 51 ++ tools/prepenv/prep_run.py | 343 +++++++++ tools/retirejs/config.yaml | 28 + tools/snyk/README.md | 12 + tools/snyk/config.yaml | 33 + tools/spotbugs/config.yaml | 28 + tools/ssllabs/config.yaml | 31 + tools/ssllabs/parser.py | 144 ++++ tools/tenableio/.DS_Store | Bin 0 -> 6148 bytes tools/tenableio/config.yaml | 42 ++ tools/tenableio/tenablescan.py | 40 ++ tools/wpscan/config.yaml | 29 + tools/wpscan/parser.py | 186 +++++ tools/zap/config.yaml | 46 ++ tools/zap/zap-baseline.py | 438 ++++++++++++ 67 files changed, 6722 insertions(+) create mode 100644 .travis.yml create mode 100644 README.md create mode 100644 build.sh create mode 100644 build/build-dockers.sh create mode 100644 build/combine-yaml.py create mode 100644 build/docker-hub.sh create mode 100644 controller/.DS_Store create mode 100644 controller/appsec.pipeline create mode 100644 controller/master.yaml create mode 100644 controller/secpipeline-config.yaml create mode 100644 controller/tool-config-template.config create mode 100644 dockers/appsecpipeline/docker-compose.yml create mode 100644 dockers/appsecpipeline/launchenv.sh create mode 100644 dockers/base/dockerfile-base create mode 100644 dockers/base/dockerfile-base-tools create mode 100644 dockers/base/dockerfile-node create mode 100644 dockers/base/dockerfile-ruby create mode 100644 dockers/base/dockerfile-sast create mode 100644 dockers/base/dockerfile-zap create mode 100644 dockers/base/setupdocker.sh create mode 100644 setup.bash create mode 100644 tools/.DS_Store create mode 100644 tools/README.md create mode 100644 tools/appspider/AppSpider.py create mode 100644 tools/appspider/PyAppSpider.py create mode 100644 tools/appspider/config.yaml create mode 100644 tools/appspider/parser.py create mode 100644 tools/arachni/config.yaml create mode 100644 tools/arachni/parser.py create mode 100644 tools/bandit/config.yaml create mode 100644 tools/bandit/parser.py create mode 100644 tools/brakeman/config.yaml create mode 100644 tools/checkmarx/.DS_Store create mode 100644 tools/checkmarx/PyCheckmarx.py create mode 100644 tools/checkmarx/ScanProject.py create mode 100644 tools/checkmarx/config.yaml create mode 100644 tools/checkmarx/requirements.txt create mode 100644 tools/cloc/config.yaml create mode 100644 tools/defectdojo/.DS_Store create mode 100644 tools/defectdojo/config.yaml create mode 100644 tools/defectdojo/dojo_ci_cd.py create mode 100644 tools/dependency-check/config.yaml create mode 100644 tools/git/.DS_Store create mode 100644 tools/git/config.yaml create mode 100644 tools/git/git.sh create mode 100644 tools/health.py create mode 100755 tools/junit.py create mode 100644 tools/launch.py create mode 100644 tools/nikto/config.yaml create mode 100644 tools/nikto/parser.py create mode 100644 tools/nmap/config.yaml create mode 100644 tools/prepenv/.DS_Store create mode 100644 tools/prepenv/config.yaml create mode 100644 tools/prepenv/prep_run.py create mode 100644 tools/retirejs/config.yaml create mode 100644 tools/snyk/README.md create mode 100644 tools/snyk/config.yaml create mode 100644 tools/spotbugs/config.yaml create mode 100644 tools/ssllabs/config.yaml create mode 100644 tools/ssllabs/parser.py create mode 100644 tools/tenableio/.DS_Store create mode 100644 tools/tenableio/config.yaml create mode 100644 tools/tenableio/tenablescan.py create mode 100644 tools/wpscan/config.yaml create mode 100644 tools/wpscan/parser.py create mode 100644 tools/zap/config.yaml create mode 100644 tools/zap/zap-baseline.py diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..15a6734 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,34 @@ +sudo: required +language: python +install: true + +services: + - docker + +env: + global: + - VERSION=1.${TRAVIS_BUILD_NUMBER} + matrix: + - PIPELINE_BUILD='base' + DOCKER_FILE='base/dockerfile-base' + REPO='appsecpipeline/base' + - PIPELINE_BUILD='basetools' + DOCKER_FILE='base/dockerfile-base-tools' + REPO='appsecpipeline/base-tools' + - PIPELINE_BUILD=sast + DOCKER_FILE='base/dockerfile-sast' + REPO='appsecpipeline/sast' + - PIPELINE_BUILD=node + DOCKER_FILE='base/dockerfile-node' + REPO='appsecpipeline/node' + - PIPELINE_BUILD=ruby + DOCKER_FILE='base/dockerfile-ruby' + REPO='appsecpipeline/ruby' + - PIPELINE_BUILD=zap + DOCKER_FILE='base/dockerfile-zap' + REPO='appsecpipeline/zap' + +script: + - | + echo "Running PipelineBuild=$PIPELINE_BUILD" + bash build/docker-hub.sh $TRAVIS_BRANCH $REPO $VERSION $DOCKER_FILE diff --git a/README.md b/README.md new file mode 100644 index 0000000..1d10220 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# AppSecPipeline diff --git a/build.sh b/build.sh new file mode 100644 index 0000000..1c00b45 --- /dev/null +++ b/build.sh @@ -0,0 +1,6 @@ +echo "Building Jenkins Jobs" +sh jenkins.sh +echo +echo "Creating global tool yaml" +python build/combine-yaml.py +echo "Complete\n" diff --git a/build/build-dockers.sh b/build/build-dockers.sh new file mode 100644 index 0000000..a737c2f --- /dev/null +++ b/build/build-dockers.sh @@ -0,0 +1,22 @@ +echo "Keeping it tidy." +docker rmi $(docker images | grep "none" | awk '/ / { print $3 }') +#docker volume rm $(docker volume ls -qf dangling=true) +#docker rmi $(docker images -q) +echo "Building dockers" +#--no-cache +docker build -f dockers/base/dockerfile-base . -t appsecpipeline/base +docker build -f dockers/base/dockerfile-base-tools . -t appsecpipeline/base-tools +docker build -f dockers/base/dockerfile-sast . -t appsecpipeline/sast +docker build -f dockers/base/dockerfile-node . -t appsecpipeline/node +docker build -f dockers/base/dockerfile-ruby . -t appsecpipeline/ruby +docker build -f dockers/base/dockerfile-zap . -t appsecpipeline/zap +docker build -f pipelines/jenkins/jenkins-local-dockerfile . -t appsecpipeline/jenkins + +echo +echo "Command Shortcuts" +echo 'docker run --rm -ti appsecpipeline/base /bin/bash' +echo 'docker run --rm -ti appsecpipeline/base-tools /bin/bash' +echo 'docker run --rm -ti appsecpipeline/sast /bin/bash' +echo 'docker run --rm -ti appsecpipeline/node /bin/bash' +echo 'docker run --rm -ti appsecpipeline/ruby /bin/bash' +echo 'docker run --rm -ti appsecpipeline/zap /bin/bash' diff --git a/build/combine-yaml.py b/build/combine-yaml.py new file mode 100644 index 0000000..f253169 --- /dev/null +++ b/build/combine-yaml.py @@ -0,0 +1,66 @@ +import os +import yaml + +secPipelineFile = "controller/secpipeline-config.yaml" + +def createSecPipeLine(): + #Re-create the pipeline config file + os.remove(secPipelineFile) + + for subdir, dirs, files in os.walk("tools"): + for file in files: + if file.lower().endswith("yaml"): + yamlFile = os.path.join(subdir, file) + + #Read tool YAML + with open(yamlFile, 'r') as toolYaml: + yamlContent = toolYaml.read() + + #Write to secpipeline-config.yaml + with open(secPipelineFile, 'a+') as file: + file.write(yamlContent) + +def quoted_presenter(dumper, data): + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"') + +yaml.add_representer(str, quoted_presenter) + +def readYAML(): + #Read tool YAML + with open(secPipelineFile, 'r') as stream: + try: + tools = yaml.safe_load(stream) + data = {} + parameters = {} + parameters_details = {} + + for tool in tools: + toolParms = tools[tool]["parameters"] + parameters_key = {} + for parameter in toolParms: + if toolParms[parameter]["type"] == "config": + parameters_key["type"] = toolParms[parameter]["type"] + parameters_key["data_type"] = toolParms[parameter]["data_type"] + parameters_key["description"] = toolParms[parameter]["description"] + parameters_key["value"] = '{replace-me}' + parameters_details[parameter] = parameters_key + parameters_key = {} + + if parameters_details: + parameters["parameters"] = parameters_details + data[tool] = parameters + + parameters = {} + parameters_details = {} + + yamlLoc = "controller/tool-config-template.config" + with open(yamlLoc, 'w') as outfile: + yaml.dump(data, outfile, default_flow_style=False) + + except yaml.YAMLError as exc: + print(exc) + + +createSecPipeLine() +readYAML() +print "Complete!" diff --git a/build/docker-hub.sh b/build/docker-hub.sh new file mode 100644 index 0000000..dfa0208 --- /dev/null +++ b/build/docker-hub.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -ev + +TRAVIS_BRANCH=$1 +REPO=$2 +VERSION=$3 +DOCKER_FILE=$4 + +if [ "$TRAVIS_BRANCH" == "master" ]; then + TAG="latest"; +else + TAG=$TRAVIS_BRANCH; +fi + +docker build -f dockers/$DOCKER_FILE . -t $REPO + +docker tag $REPO $REPO:$TAG +docker tag $REPO $REPO:$VERSION +if [ "$TRAVIS_BRANCH" == "master" ] && [ "$DOCKER_USER" != "" ] && [ "$DOCKER_PASS" != "" ]; then + docker login -u "$DOCKER_USER" -p "$DOCKER_PASS"; + docker push $REPO ; +fi diff --git a/controller/.DS_Store b/controller/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z45|!R0Z1N%F(jFgL>QrFAPJ2!M?+vV1V%$(Gz3ON zU^D~<VF)ln+{D2Rp-0Kl5Eu=C(GY-#0H}OW0QD6Z7#JL&bOVG2Nii@oFo3%Nj0_Ac zFio(203!nfNGnJUNGpg2X=PvpvA|}4wK6b5wK9UcAq)(R;4TS>25V<v1ltVagS9g- zf^BACV1#IAV1(Mt2<@RTf_gL{^C8+97{Ru~TsKOOhQMeDz(Rl-!Vmz}|E>%SxcdJP zRior+2#kinunYl47MEZbCs3t{!+W4QHvuXKVuPw;Mo^s$(F3lEVT}ML$bg~*R5_@+ b2Uo?6kTwK}57Iu`5P${HC_Nei0}uiLNUI8I literal 0 HcmV?d00001 diff --git a/controller/appsec.pipeline b/controller/appsec.pipeline new file mode 100644 index 0000000..b4f7e55 --- /dev/null +++ b/controller/appsec.pipeline @@ -0,0 +1,49 @@ +version: AppSecPipeline 0.5.0 + +#Sample appsec.pipeline file that could reside in an app repo +profiles: + production: + - tool: "nmap" + profile: "all" + - tool: "zap" + profile: "quick" + - tool: "ssllabs" + profile: "all" + - tool: "defectdojo" + profile: "all" + + dynamic: + - tool: "nikto" + profile: "tuned" + - tool: "arachni" + profile: "xss" + - tool: "defectdojo" + profile: "all" + + static: + - tool: "cloc" + profile: "all" + - tool: "bandit" + profile: "tuned" + - tool: "brakeman" + profile: "tuned" + - tool: "retirejs" + profile: "all" + finding-severity: #Only import issues from retireJs with a severity of Critical + - "Critical" + - tool: "snyk" + profile: "all" + - tool: "dependency-check" + profile: "all" + - tool: "defectdojo" + profile: "all" + + analyze-code: + - tool: "cloc" + profile: "all" + +#Define which profile to run based off of a code checkin +deployment: + master: fast + devel: sast + pre-prod: standard diff --git a/controller/master.yaml b/controller/master.yaml new file mode 100644 index 0000000..174290a --- /dev/null +++ b/controller/master.yaml @@ -0,0 +1,145 @@ +version: AppSecPipeline 0.6.0 + +# Global configuration settings +global: + min-severity: low + max-tool-run: 720 #Maximum time to run a tool before terminating the container, specified in minutes + max-parallel: 3 #Maximum number of concurrent docker containers to run per Pipeline + max-dynamic: 1 #Maximum number of dynamic containers to run at once + +#Profile definition of what tools to run for a particular application +profiles: + sourcecode: + pipeline: + - tool: "git" + tool-profile: "tags" + on-failure: "fail" #continue / fail / tool? + - tool: "cloc" + tool-profile: "all" + on-failure: + - tool: "checkmarx" + tool-profile: "all" + - tool: "bandit" + tool-profile: "tuned" + - tool: "brakeman" + tool-profile: "tuned" + - tool: "retirejs" + tool-profile: "all" + - tool: "defectdojo" + tool-profile: "all" + + gitcloc: + pipeline: + - tool: "git" + tool-profile: "clone" + - tool: "cloc" + tool-profile: "all" + + standard: + pipeline: + - tool: "cloc" + tool-profile: "all" + - tool: "nikto" + tool-profile: "fast" + - tool: "arachni" + tool-profile: "xss" + - tool: "bandit" + tool-profile: "tuned" + - tool: "brakeman" + tool-profile: "tuned" + - tool: "retirejs" + tool-profile: "all" + - tool: "defectdojo" + tool-profile: "all" + + production: + pipeline: + - tool: "zap" + tool-profile: "quick" + - tool: "nmap" + tool-profile: "quick" + - tool: "ssllabs" + tool-profile: "all" + startup: + tool: prepenv + tool-profile: all + runevery: + tool: defectdojo + tool-profile: engagement + final: + tool: defectdojo + tool-profile: close_engagement + + ssllabs: + pipeline: + - tool: "ssllabs" + tool-profile: "all" + + tenableio: + pipeline: + - tool: "tenableio" + tool-profile: "all" + + wpscan: + pipeline: + - tool: "wpscan" + tool-profile: "all_enumeration" + + nmap: + pipeline: + - tool: "nmap" + tool-profile: "quick" + + defectdojonmap: + pipeline: + - tool: "nmap" + tool-profile: "quick" + - tool: "defectdojo" + tool-profile: "all" + + appspider: + pipeline: + - tool: "appspider" + tool-profile: "all" + + defectdojo: + pipeline: + - tool: "defectdojo" + tool-profile: "all" + + dynamic: + pipeline: + - tool: "nikto" + tool-profile: "tuned" + - tool: "arachni" + tool-profile: "xss" + - tool: "defectdojo" + tool-profile: "all" + + static: + pipeline: + - tool: "cloc" + tool-profile: "all" + - tool: "bandit" + tool-profile: "tuned" + - tool: "brakeman" + tool-profile: "tuned" + - tool: "retirejs" + tool-profile: "all" + - tool: "snyk" + tool-profile: "all" + - tool: "dependency-check" + tool-profile: "all" + - tool: "defectdojo" + tool-profile: "all" + + analyze-code: + pipeline: + - tool: "cloc" + tool-profile: "all" + +#Define which profile to run based off of a code checkin +deployment: + master: fast + devel: sast + pre-prod: standard diff --git a/controller/secpipeline-config.yaml b/controller/secpipeline-config.yaml new file mode 100644 index 0000000..d7df065 --- /dev/null +++ b/controller/secpipeline-config.yaml @@ -0,0 +1,765 @@ +appspider: + version: AppSecPipeline 0.5.0 + tool-version: 3.8 + tags: + - "Dynamic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "AppSpider automatically finds vulnerabilities across a wide range of applications from the relatively simple to the most complex—and it includes unique capabilities that enable teams to automate more of the security testing program across the entire software development lifecycle, from creation through production." + url: https://www.rapid7.com/products/appspider/ + documentation: https://appspider.help.rapid7.com/docs/ + docker: "appsecpipeline/base:latest" + parameters: + APPSPIDER_RUN_SCAN_POLL: + type: runtime + data_type: bool + description: "Wait for the scan to complete and specify the AppSpider profile." + APPSPIDER_CLIENT: + type: runtime + data_type: string + description: "AppSpider Client to use for connecting to the API." + APPSPIDER_URL: + type: config + data_type: url + description: "AppSpider Enterprise URL." + APPSPIDER_USERNAME: + type: config + data_type: username + description: "AppSpider API Username" + APPSPIDER_PASSWORD: + type: config + data_type: password + description: "AppSpider API Password" + APPSPIDER_ADMIN_USERNAME: + type: config + data_type: username + description: "AppSpider Admin API Username" + APPSPIDER_ADMIN_PASSWORD: + type: config + data_type: password + description: "AppSpider Admin API Password" + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/appspider/AppSpider.py --url $APPSPIDER_URL --username $APPSPIDER_USERNAME --password $APPSPIDER_PASSWORD --admin-username $APPSPIDER_ADMIN_USERNAME --admin-password $APPSPIDER_ADMIN_PASSWORD --client $APPSPIDER_CLIENT" + shell: False + report: "--output-file='{reportname}'" + reportname: "VulnerabilitiesSummary.xml" + post: + junit: + profiles: + all: "--run-scan-poll $APPSPIDER_RUN_SCAN_POLL" + active: "--checks=active/*" + passive: "--checks=passive/*" + fast: "--audit-forms --checks=xss --scope-page-limit=1" + file_upload: "--checks=form_upload" + xss: "--checks=xss" + remote: "--checks=file_inclusion" + command_exec: "--checks=os_cmd_injection" + sqli: "--checks=sql_injection" +arachni: + version: AppSecPipeline 0.5.0 + tool-version: + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "Arachni is a feature-full, modular, high-performance Ruby framework aimed towards helping penetration testers and administrators evaluate the security of modern web applications." + url: http://www.arachni-scanner.com/ + documentation: https://github.com/Arachni/arachni/wiki/Command-line-user-interface + docker: "appsecpipeline/base-tools:latest" + parameters: + URL: + type: runtime + data_type: url + description: "URL of host to scan." + LOGIN_URL: + type: runtime + data_type: url + description: "Login URL of host to scan." + LOGIN_PARMS: + type: runtime + data_type: string + description: "Login paramaters in the format of username=user&password=password." + LOGIN_SUCCESS: + type: runtime + data_type: string + description: "Succesful login text to match on." + LOGIN_LOGOUT_PATTERN: + type: runtime + data_type: string + description: "Logout text to avoid." + commands: + pre: + exec: "arachni $URL" + shell: True + report: "--report-save-path={reportname}" + reportname: "{timestamp}.afr" + post: "arachni_reporter {reportname} --reporter=json:outfile={reportname}.json" + junit: + credentials: + simple: --plugin=autologin:url=$LOGIN_URL,parameters="$LOGIN_PARMS",check="$LOGIN_SUCCESS" --scope-exclude-pattern=$LOGIN_LOGOUT_PATTERN + profiles: + all: "" + active: "--checks=active/*" + passive: "--checks=passive/*" + fast: "--audit-forms --checks=xss --scope-page-limit=1" + file_upload: "--checks=form_upload" + xss: "--checks=xss" + remote: "--checks=file_inclusion" + command_exec: "--checks=os_cmd_injection" + sqli: "--checks=sql_injection" +bandit: + version: AppSecPipeline 0.5.0 + tool-version: + name: bandit + tags: + - "Static Code Analyzer" + type: "static" + description: "Bandit is a tool designed to find common security issues in Python code. To do this Bandit processes each file, builds an AST from it, and runs appropriate plugins against the AST nodes. Once Bandit has finished scanning all the files it generates a report." + docker: "appsecpipeline/base-tools:latest" + url: https://wiki.openstack.org/wiki/Security/Projects/Bandit + documentation: https://docs.openstack.org/bandit/latest/index.html + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + pre: + exec: "bandit" + shell: True + report: "-f csv -o {reportname}" + reportname: "{timestamp}.csv" + post: "python /usr/bin/appsecpipeline/tools/bandit/parser.py -f {reportname}" + junit: "junit.py -f {reportname} -t bandit" + languages: + - "python" + profiles: + #Runs the full bandit scan + all: "-r $LOC" + #Only the issues that are the highest severity and the highest confidence + tuned: "-lll -iii -r $LOC" +brakeman: + version: AppSecPipeline 0.5.0 + tags: + - "Static Code Analyzer" + type: "static" + description: "Brakeman is an open source vulnerability scanner specifically designed for Ruby on Rails applications. It statically analyzes Rails application code to find security issues at any stage of development." + docker: "appsecpipeline/ruby:latest" + url: https://brakemanscanner.org/ + documentation: https://brakemanscanner.org/docs/ + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + pre: + exec: "brakeman -p $LOC --no-pager" + shell: False + post: + report: "-o {reportname}" + reportname: "{timestamp}.json" + junit: + languages: + - "ruby" + profiles: + #There are some checks which are not run by default. To run all checks, use + all: "-A" + #If Brakeman is running a bit slow, try + tuned: "--faster" +checkmarx: + version: AppSecPipeline 0.5.0 + tags: + - "Static Scanner" + type: "static" + name: "checkmarx" + icon-sm: + icon-lg: + description: "Checkmarx is a source code analysis product that allows organization to scan uncompiled code and identify vulnerabilities." + url: https://www.checkmarx.com/technology/static-code-analysis-sca/ + documentation: https://checkmarx.atlassian.net/wiki/spaces/KC/overview + docker: appsecpipeline/base + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + CHECKMARX_PROJECT: + type: runtime + data_type: int + description: "Checkmarx Project ID" + CHECKMARX_URL: + type: config + data_type: url + description: "Checkmarx web URL." + CHECKMARX_USERNAME: + type: config + data_type: username + description: "Checkmarx username." + CHECKMARX_PASSWORD: + type: config + data_type: password + description: "Checkmarx password." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/checkmarx/ScanProject.py --url $CHECKMARX_URL --username $CHECKMARX_USERNAME --password $CHECKMARX_PASSWORD --project=$CHECKMARX_PROJECT --source=$LOC" + shell: False + report: "--report {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + languages: + - "ruby" + - "java" + - "c#.net" + - "php" + - "python" + - "groovy" + - "android" + - "ios" + - "html5" + - "c++" + - "vb.net" + - "vb" + - "pl/sql" + - "perl" + - "apex" + - "scala" + - "swift" + profiles: + all: " " +cloc: + tags: + - "Static Code Analyzer" + type: "code-analyzer" + version: AppSecPipeline 0.5.0 + icon-sm: + icon-lg: + description: "cloc counts blank lines, comment lines, and physical lines of source code in many programming languages." + url: https://github.com/AlDanial/cloc + documentation: https://github.com/AlDanial/cloc + docker: "appsecpipeline/base-tools" + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + parameters: "LOC : File or folder location of source code to examine. LOC=/temp/code" + pre: + exec: "cloc" + report: "--report-file={reportname}" + reportname: "languages.json" + post: "cat {reportname}" + junit: + profiles: + all: "$LOC --json" +defectdojo: + version: AppSecPipeline 0.5.0 + name: DefectDojo + tags: + - "Vulnerability Management" + - "Report Consumer" + type: "collector" + description: "DefectDojo is a security program and vulnerability management tool. DefectDojo allows you to manage your application security program, maintain product and application information, schedule scans, triage vulnerabilities and push findings into defect trackers. Consolidate your findings into one source of truth with DefectDojo." + docker: "appsecpipeline/base-tools" + url: https://github.com/OWASP/django-DefectDojo + documentation: http://defectdojo.readthedocs.io/en/latest/ + parameters: + DOJO_ENGAGEMENT_ID: + type: runtime + data_type: int + description: "Engagement id that exists in DefectDojo." + DOJO_DIR: + type: runtime + data_type: string + description: "Directory where the report reside in for importing into DefectDojo." + BUILD_ID: + type: runtime + data_type: string + description: "Build ID from upstream CI/CD." + DOJO_API_KEY: + type: config + data_type: key + description: "Dojo API key." + DOJO_HOST: + type: config + data_type: string + description: "DefectDojo host." + DOJO_PRODUCT_ID: + type: runtime + data_type: int + description: "DefectDojo product id." + DOJO_PROXY: + type: config + data_type: url + description: "Optional proxy for connecting to DefectDojo." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/defectdojo/dojo_ci_cd.py --dir=$DOJO_DIR --api_key=$DOJO_API_KEY --host=$DOJO_HOST --product=$DOJO_PRODUCT_ID" + shell: False + post: + report: + reportname: + junit: + profiles: + all: "--build_id=$BUILD_ID --closeengagement" + close_engagement: "--engagement=$DOJO_ENGAGEMENT_ID --closeengagement" + engagement: "--engagement=$DOJO_ENGAGEMENT_ID" + all_proxy: "--proxy=$DOJO_PROXY --build_id=$BUILD_ID" +dependency-check: + version: AppSecPipeline 0.5.0 + tags: + - "Components with known Vulnerabilities" + type: "static" + description: "Dependency-Check is a utility that identifies project dependencies and checks if there are any known, publicly disclosed, vulnerabilities. Currently Java and .NET are supported; additional experimental support has been added for Ruby, Node.js, Python, and limited support for C/C++ build systems (autoconf and cmake)." + docker: "appsecpipeline/sast" + url: https://www.owasp.org/index.php/OWASP_Dependency_Check + documentation: https://jeremylong.github.io/DependencyCheck/ + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + PROJECT: + type: runtime + data_type: string + description: "Name of the Dependency project." + commands: + pre: + exec: "/usr/bin/dependency-check/bin/dependency-check.sh" + shell: False + post: + report: "--out {reportname} --format XML" + reportname: "{timestamp}.xml" + junit: + languages: + - "java" + - "nodejs" + - "ruby" + - ".net" + - "python" + profiles: + #Runs the full dependency scan, only updates every week + all: "--project $PROJECT --scan $LOC --cveValidForHours 168" +git: + version: AppSecPipeline 0.5.0 + tags: + - "Utility" + type: "utility" + description: "Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency." + docker: "appsecpipeline/base" + url: https://git-scm.com/ + documentation: https://git-scm.com/docs/git + parameters: + GIT_URL: + type: runtime + data_type: url + description: "URL of the source code repository." + LOC: + type: runtime + data_type: string + description: "Location of the source code." + GIT_TAGS: + type: runtime + data_type: string + description: "Checkout a specified tag." + commands: + pre: + exec: "sh git.sh" + shell: False + post: + report: + reportname: + junit: + profiles: + clone: "clone $GIT_URL $LOC" + tags: "$GIT_URL $LOC $GIT_TAGS" +nikto: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "Web server scanner which performs comprehensive tests against web servers for multiple items, including over 3500 potentially dangerous files/CGIs, versions on over 900 servers, and version specific problems on over 250 servers." + url: https://cirt.net/Nikto2 + documentation: https://cirt.net/nikto2-docs/ + docker: "appsecpipeline/base-tools" + parameters: + URL: + type: runtime + data_type: url + description: "URL of the site to scan." + commands: + pre: + exec: "nikto -h $URL" + report: "-output '{reportname}'" + reportname: "{timestamp}.xml" + post: "python /usr/bin/appsecpipeline/tools/nikto/parser.py -f '{reportname}'" + junit: "junit.py -f '{reportname}' -t nikto" + profiles: + all: "" + tuned: "-Tuning x 6" + fast: "-Plugins \"headers;report_xml\"" + file_upload: "-Tuning 0" + misconfig: "-Tuning 2" + info: "-Tuning 3" + xss: "-Tuning 4" + remote: "-Tuning 57" + dos: "-Tuning 6" + command_exec: "-Tuning 8" + sqli: "-Tuning 9" + identification: "-Tuning b" +nmap: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "infrastructure" + icon-sm: + icon-lg: + description: "Nmap is a free and open source (license) utility for network discovery and security auditing. Many systems and network administrators also find it useful for tasks such as network inventory, managing service upgrade schedules, and monitoring host or service uptime. Nmap uses raw IP packets in novel ways to determine what hosts are available on the network, what services (application name and version) those hosts are offering, what operating systems (and OS versions) they are running, what type of packet filters/firewalls are in use, and dozens of other characteristics. It was designed to rapidly scan large networks, but works fine against single hosts." + url: https://nmap.org/ + documentation: https://nmap.org/book/man.html + docker: "appsecpipeline/base-tools" + parameters: + TARGET: + type: runtime + data_type: host + description: "Target hostname of the site to scan." + commands: + pre: + exec: "nmap" + shell: False + report: "-oX {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + credentials: + simple: + profiles: + #Full handshake, fairly fast + intensive_evident: "-sT -p- -A -T4 $TARGET" + #Scans all TCP ports + all: "-p 1-65535 -T4 -A -v $TARGET" + #Default everything. Will issue a TCP SYN scan for the most common 1000 TCP ports, using ICMP Echo request (ping) for host detection. + regular: "$TARGET" + #scan the most common TCP ports. It will make an effort in determining the OS type and what services and their versions are running. + intense: "-T4 -A -v $TARGET" + #Same as the intense scan and will also scan UDP ports (-sU) + intense_udp: "-sS -sU -T4 -A -v $TARGET" + #Do only a ping only on the target, no port scan. + ping: "-sn $TARGET" + #Limiting the number of TCP ports scanned to only the top 100 most common TCP ports + quick: "-T4 -F $TARGET" + #Version and OS detection + quick_light: "-sV -T4 -O -F –version-light $TARGET" + #determine hosts and routers in a network scan. It will traceroute and ping all hosts defined in the target. + traceroute: "-sn –traceroute $TARGET" + #Intense scan plus UDP, highly intrusive and very slow + comprehensive: "-sS -sU -T4 -A -v -PE -PP -PS80,443 -PA3389 -PU40125 -PY -g 53 -script \"default or (discovery and safe)\" $TARGET" + #SYN scan + syn: "–sS $TARGET" + #UDP scan + udp: "–sU $TARGET" + #SCTP INIT Scan, Like SYN scan, INIT scan is relatively unobtrusive and stealthy, since it never completes SCTP associations. + sctp: "–sY $TARGET" + #TCP Window Scan, exactly the same as ACK scan, except that it exploits an implementation detail of certain systems to differentiate open ports from closed ones, rather than always printing unfiltered when an RST is returned. + windows: "–sW $TARGET" + #Stealth scan + stealth: "-sS -p- -T2 $TARGET" +prepenv: + version: AppSecPipeline 0.5.0 + tags: + - "Utility" + type: "utility" + description: "AppSecPipeline Utility" + docker: "appsecpipeline/base" + url: https://github.com/OWASP/django-DefectDojo + documentation: http://defectdojo.readthedocs.io/en/latest/ + parameters: + DOJO_ENGAGEMENT_ID: + type: runtime + data_type: int + description: "Engagement id that exists in DefectDojo." + DOJO_DIR: + type: runtime + data_type: string + description: "Directory where the report reside in for importing into DefectDojo." + BUILD_ID: + type: runtime + data_type: string + description: "Build ID from upstream CI/CD." + DOJO_API_KEY: + type: config + data_type: key + description: "Dojo API key." + DOJO_HOST: + type: config + data_type: string + description: "DefectDojo host." + DOJO_PRODUCT_ID: + type: runtime + data_type: int + description: "DefectDojo product id." + DOJO_PROXY: + type: config + data_type: url + description: "Optional proxy for connecting to DefectDojo." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/prepenv/prep_run.py --dir=$DOJO_DIR --api_key=$DOJO_API_KEY --host=$DOJO_HOST --product=$DOJO_PRODUCT_ID" + shell: False + post: + report: + reportname: + junit: + profiles: + all: "--build_id=$BUILD_ID" + close_engagement: "--engagement=$DOJO_ENGAGEMENT_ID --closeengagement" + engagement: "--engagement=$DOJO_ENGAGEMENT_ID" + all_proxy: "--proxy=$DOJO_PROXY --build_id=$BUILD_ID" +retirejs: + version: AppSecPipeline 0.5.0 + tags: + - "Components with known Vulnerabilities" + type: "static" + description: "There is a plethora of JavaScript libraries for use on the Web and in Node.JS apps out there. This greatly simplifies development,but we need to stay up-to-date on security fixes. Using 'Components with Known Vulnerabilities' is now a part of the OWASP Top 10 list of security risks and insecure libraries can pose a huge risk to your Web app. The goal of Retire.js is to help you detect the use of JS-library versions with known vulnerabilities." + docker: "appsecpipeline/node" + url: https://retirejs.github.io/retire.js/ + documentation: https://github.com/RetireJS/retire.js + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + pre: + exec: "retire" + shell: False + post: + report: "--outputpath {reportname} --outputformat json" + reportname: "{timestamp}.json" + junit: + languages: + - "javascript" + - "nodejs" + profiles: + #Runs the full dependency scan + all: "--path $LOC" +snyk: + version: AppSecPipeline 0.5.0 + tags: + - "Static Code Analyzer" + type: "static" + description: "Snyk continuously monitors your application's dependencies and lets you quickly respond when new vulnerabilities are disclosed." + docker: "appsecpipeline/node" + url: https://snyk.io/ + documentation: https://snyk.io/docs/ + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + SNYK_API_TOKEN: + type: config + data_type: api + description: "Synk API token" + commands: + pre: "snyk auth $SNYK_API_TOKEN" + exec: "snyk" + shell: True + post: + report: "--json > {reportname}" + reportname: "{timestamp}.json" + junit: + languages: + - "ruby" + - "nodejs" + - "java" + - "python" + profiles: + all: "test ." +spotbugs: + version: AppSecPipeline 0.5.0 + tags: + - "Static Code Analyzer" + type: "static" + description: "SpotBugs is a program which uses static analysis to look for bugs in Java code." + docker: "appsecpipeline/sast" + url: https://spotbugs.github.io/ + documentation: http://spotbugs-in-kengo-toda.readthedocs.io/en/latest/index.html + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + parameters: "COMPILE_LOC : Location of jar file. LOC=/temp/jar" + pre: + exec: "spotbugs -textui" + shell: False + post: + report: "-xml -output {reportname} $COMPILE_LOC" + reportname: "{timestamp}.xml" + junit: + languages: + - "java" + profiles: + #Runs the full bandit scan + all: "-effort:max" +ssllabs: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "server" + icon-sm: + icon-lg: + description: "This tool is a command-line client for the SSL Labs APIs, designed for automated and/or bulk testing." + url: https://github.com/ssllabs/ssllabs-scan + documentation: https://sourceforge.net/p/ssllabs/mailman/ssllabs-devel/ + docker: "appsecpipeline/base-tools" + parameters: + URL: + type: runtime + data_type: url + description: "URL of the site to scan." + commands: + pre: + exec: "ssllabs-scan" + shell: True + report: "> {reportname}" + reportname: "{timestamp}.json" + post: "python /usr/bin/appsecpipeline/tools/ssllabs/parser.py -f '{reportname}'" + junit: + credentials: + simple: + profiles: + all: "-usecache -verbosity=DEBUG $URL" + quick: " " + grade: "-usecache -grade $URL" +tenableio: + version: AppSecPipeline 0.5.0 + tags: + - "Infrastructure Scanner" + type: "infrastructure" + icon-sm: + icon-lg: + description: "Cloud-based Cyber Exposure platform for modern assets - from IT to cloud to IoT and OT." + url: https://www.tenable.com/products/tenable-io + documentation: https://docs.tenable.com/TenableIO.htm + docker: appsecpipeline/base-tools + parameters: + TARGET: + type: runtime + data_type: host + description: "Target hostname or ip address." + TENABLE_TEMPLATE: + type: runtime + data_type: string + description: "Tenable profile to run. (Defined in TenableIO)" + TENABLE_SCAN_NAME: + type: runtime + data_type: string + description: "Name of TenableIO scan." + TENABLE_ACCESS_KEY: + type: config + data_type: key + description: "Access key, generated in the TenableIO GUI." + TENABLE_SECRET_KEY: + type: config + data_type: key + description: "Secret key, generated in the TenableIO GUI." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/tenableio/tenablescan.py --target $TARGET --access_key $TENABLE_ACCESS_KEY --secret_key $TENABLE_SECRET_KEY" + shell: False + report: "--report {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + profiles: + all: " " +wpscan: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "WPScan is a black box WordPress vulnerability scanner." + url: https://wpscan.org + documentation: https://github.com/wpscanteam/wpscan + docker: "appsecpipeline/base-tools" + parameters: + URL: + type: runtime + data_type: url + description: "URL of the site to scan." + commands: + pre: + exec: "wpscan --url $URL" + shell: True + report: "--format json --output {reportname}" + reportname: "{timestamp}.json" + post: "python /usr/bin/appsecpipeline/tools/wpscan/parser.py -f {reportname}" + junit: + profiles: + non_intrusive: "--detection-mode passive --random-user-agent" + plugins: "--enumerate p --random-user-agent" + all_enumeration: "--enumerate p --detection-mode mixed --random-user-agent" +zap: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "The OWASP Zed Attack Proxy (ZAP) is one of the world’s most popular free security tools and is actively maintained by hundreds of international volunteers*. It can help you automatically find security vulnerabilities in your web applications while you are developing and testing your applications. Its also a great tool for experienced pentesters to use for manual security testing." + url: https://github.com/zaproxy/zaproxy + documentation: https://github.com/zaproxy/zaproxy/wiki + docker: "appsecpipeline/zap" + parameters: + URL: + type: runtime + data_type: url + description: "URL of host to scan." + LOGIN_URL: + type: runtime + data_type: url + description: "Login URL of host to scan." + LOGIN_PARMS: + type: runtime + data_type: string + description: "Login paramaters in the format of username=user&password=password." + LOGIN_SUCCESS: + type: runtime + data_type: string + description: "Succesful login text to match on." + LOGIN_LOGOUT_PATTERN: + type: runtime + data_type: string + description: "Logout text to avoid." + commands: + pre: + exec: "python /zap/zap-baseline.py -t $URL" + shell: False + report: "-x {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + credentials: + simple: + profiles: + all: "" + quick: " " diff --git a/controller/tool-config-template.config b/controller/tool-config-template.config new file mode 100644 index 0000000..f7325a5 --- /dev/null +++ b/controller/tool-config-template.config @@ -0,0 +1,97 @@ +"appspider": + "parameters": + "APPSPIDER_ADMIN_PASSWORD": + "data_type": "password" + "description": "AppSpider Admin API Password" + "type": "config" + "value": "{replace-me}" + "APPSPIDER_ADMIN_USERNAME": + "data_type": "username" + "description": "AppSpider Admin API Username" + "type": "config" + "value": "{replace-me}" + "APPSPIDER_PASSWORD": + "data_type": "password" + "description": "AppSpider API Password" + "type": "config" + "value": "{replace-me}" + "APPSPIDER_URL": + "data_type": "url" + "description": "AppSpider Enterprise URL." + "type": "config" + "value": "{replace-me}" + "APPSPIDER_USERNAME": + "data_type": "username" + "description": "AppSpider API Username" + "type": "config" + "value": "{replace-me}" +"checkmarx": + "parameters": + "CHECKMARX_PASSWORD": + "data_type": "password" + "description": "Checkmarx password." + "type": "config" + "value": "{replace-me}" + "CHECKMARX_URL": + "data_type": "url" + "description": "Checkmarx web URL." + "type": "config" + "value": "{replace-me}" + "CHECKMARX_USERNAME": + "data_type": "username" + "description": "Checkmarx username." + "type": "config" + "value": "{replace-me}" +"defectdojo": + "parameters": + "DOJO_API_KEY": + "data_type": "key" + "description": "Dojo API key." + "type": "config" + "value": "{replace-me}" + "DOJO_HOST": + "data_type": "string" + "description": "DefectDojo host." + "type": "config" + "value": "{replace-me}" + "DOJO_PROXY": + "data_type": "url" + "description": "Optional proxy for connecting to DefectDojo." + "type": "config" + "value": "{replace-me}" +"prepenv": + "parameters": + "DOJO_API_KEY": + "data_type": "key" + "description": "Dojo API key." + "type": "config" + "value": "{replace-me}" + "DOJO_HOST": + "data_type": "string" + "description": "DefectDojo host." + "type": "config" + "value": "{replace-me}" + "DOJO_PROXY": + "data_type": "url" + "description": "Optional proxy for connecting to DefectDojo." + "type": "config" + "value": "{replace-me}" +"snyk": + "parameters": + "SNYK_API_TOKEN": + "data_type": "api" + "description": "Synk API token" + "type": "config" + "value": "{replace-me}" +"tenableio": + "parameters": + "TENABLE_ACCESS_KEY": + "data_type": "key" + "description": "Access key, generated in the TenableIO GUI." + "type": "config" + "value": "{replace-me}" + "TENABLE_SECRET_KEY": + "data_type": "key" + "description": "Secret key, generated in the TenableIO GUI." + "type": "config" + "value": "{replace-me}" diff --git a/dockers/appsecpipeline/docker-compose.yml b/dockers/appsecpipeline/docker-compose.yml new file mode 100644 index 0000000..1ac333e --- /dev/null +++ b/dockers/appsecpipeline/docker-compose.yml @@ -0,0 +1,38 @@ +version: "3" +services: + bodgeit: + image: psiinon/bodgeit + ports: + - "9000:8080" + networks: + appsec: + aliases: + - bodgeit.appsec.pipeline + defectdojo: + image: appsecpipeline/django-defectdojo:latest + ports: + - "8000:8000" + networks: + appsec: + aliases: + - defectdojo.appsec.pipeline + jenkins-pipeline: + image: appsecpipeline/jenkins + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - jenkins_home:/var/jenkins_home + ports: + - "8080:8080" + - "50000:50000" + networks: + appsec: + aliases: + - jenkins.appsec.pipeline + +networks: + appsec: + external: + name: appsecpipeline_default + +volumes: + jenkins_home: diff --git a/dockers/appsecpipeline/launchenv.sh b/dockers/appsecpipeline/launchenv.sh new file mode 100644 index 0000000..a71a6c1 --- /dev/null +++ b/dockers/appsecpipeline/launchenv.sh @@ -0,0 +1,6 @@ +if [ ! "$(docker network ls -f name=appsecpipeline_default | grep appsecpipeline_default)" ]; then + docker network create --driver bridge appsecpipeline_default +fi +docker-compose up -d +echo "Adding allowed hosts to DefectDojo, assuming name: appsecpipeline_defectdojo_1. If the command fails double check the DefectDojo container name." +docker exec -ti appsecpipeline_defectdojo_1 sed -i "s/ALLOWED_HOSTS = \[\]/ALLOWED_HOSTS = ['defectdojo.appsec.pipeline', 'localhost']/g" /opt/django-DefectDojo/dojo/settings.py diff --git a/dockers/base/dockerfile-base b/dockers/base/dockerfile-base new file mode 100644 index 0000000..b1a0ce4 --- /dev/null +++ b/dockers/base/dockerfile-base @@ -0,0 +1,30 @@ +FROM kalilinux/kali-linux-docker + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + ca-certificates \ + git \ + python-pip \ + python2.7 \ + python2.7-dev \ + csvtool \ + openjdk-9-jre-headless \ + nmap + +########## AppSecPipeline Install ########## +COPY tools /usr/bin/appsecpipeline/tools +COPY dockers/base/setupdocker.sh /tmp +ENV PATH="/usr/bin/appsecpipeline/tools:${PATH}" +RUN sh /tmp/setupdocker.sh +RUN rm /tmp/setupdocker.sh + +########## Checkmarx Install ########## +RUN pip install -r /usr/bin/appsecpipeline/tools/checkmarx/requirements.txt + +ENTRYPOINT ["launch.py"] + +HEALTHCHECK --interval=1m --retries=2 --timeout=5s CMD python /usr/bin/appsecpipeline/tools/health.py diff --git a/dockers/base/dockerfile-base-tools b/dockers/base/dockerfile-base-tools new file mode 100644 index 0000000..3a00aeb --- /dev/null +++ b/dockers/base/dockerfile-base-tools @@ -0,0 +1,61 @@ +FROM kalilinux/kali-linux-docker + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + ca-certificates \ + git \ + python-pip \ + python2.7 \ + python2.7-dev \ + csvtool \ + openjdk-9-jre-headless \ + nikto \ + cloc \ + unzip \ + nmap + +########## AppSecPipeline Install ########## +COPY tools /usr/bin/appsecpipeline/tools +COPY dockers/base/setupdocker.sh /tmp +ENV PATH="/usr/bin/appsecpipeline/tools:${PATH}" +RUN sh /tmp/setupdocker.sh +RUN rm /tmp/setupdocker.sh + +########## Tenable Install ########## +RUN pip install -U tenable_io + +########## Bandit Install ########## +RUN pip install -U bandit + +########## Arachni Install ########## +#Install Arachni, packaged apt-get install Arachni doesn't work for some reason, hangs on BrowserCluster +ARG VERSION=1.5.1 +ARG WEB_VERSION=0.5.12 + +RUN mkdir /usr/share/arachni && \ + wget -qO- https://github.com/Arachni/arachni/releases/download/v${VERSION}/arachni-${VERSION}-${WEB_VERSION}-linux-x86_64.tar.gz | tar xvz -C /usr/share/arachni --strip-components=1 + +RUN echo '#!/bin/bash\n\ncd /usr/share/arachni/bin/ && ./arachni "$@"' > /usr/bin/arachni +RUN echo '#!/bin/bash\n\ncd /usr/share/arachni/bin/ && ./arachni_reporter "$@"' > /usr/bin/arachni_reporter + +RUN chmod +x /usr/bin/arachni +RUN chmod +x /usr/bin/arachni_reporter + +#For Arachni to run properly the appsecpipeline user needs write permissions on component cache +RUN chown -R appsecpipeline: /usr/share/arachni/system/ + +########## SSLLabs Scanner Install ########## +ARG SSLLAB=1.4.0 + +RUN wget -qO- https://github.com/ssllabs/ssllabs-scan/releases/download/v${SSLLAB}/ssllabs-scan_${SSLLAB}-linux64.tgz | tar xvz -C /usr/bin --strip-components=1 + +########## Change to appsecpipeline user ########## +USER appsecpipeline + +ENTRYPOINT ["launch.py"] + +HEALTHCHECK --interval=1m --retries=2 --timeout=5s CMD python /usr/bin/appsecpipeline/tools/health.py diff --git a/dockers/base/dockerfile-node b/dockers/base/dockerfile-node new file mode 100644 index 0000000..aa7bdac --- /dev/null +++ b/dockers/base/dockerfile-node @@ -0,0 +1,28 @@ +FROM node + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + ca-certificates \ + git \ + python-pip \ + python2.7 \ + python2.7-dev + +########## AppSecPipeline Install ########## +COPY tools /usr/bin/appsecpipeline/tools +COPY dockers/base/setupdocker.sh /tmp +ENV PATH="/usr/bin/appsecpipeline/tools:${PATH}" +RUN sh /tmp/setupdocker.sh +RUN rm /tmp/setupdocker.sh + +########## Retire.js Install ########## +RUN npm install -g retire + +########## Install Synk Install ########## +RUN npm install -g snyk + +ENTRYPOINT ["launch.py"] + +HEALTHCHECK --interval=1m --retries=2 --timeout=5s CMD python /usr/bin/appsecpipeline/tools/health.py diff --git a/dockers/base/dockerfile-ruby b/dockers/base/dockerfile-ruby new file mode 100644 index 0000000..68eb008 --- /dev/null +++ b/dockers/base/dockerfile-ruby @@ -0,0 +1,34 @@ +FROM ruby:2.4 + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + ca-certificates \ + git \ + python-pip \ + python2.7 \ + python2.7-dev + +########## AppSecPipeline Install ########## +COPY tools /usr/bin/appsecpipeline/tools +COPY dockers/base/setupdocker.sh /tmp +ENV PATH="/usr/bin/appsecpipeline/tools:${PATH}" +RUN sh /tmp/setupdocker.sh +RUN rm /tmp/setupdocker.sh + +########## AppSecPipeline Install ########## +ENV BRAKEMAN_VERSION=4.0 +RUN gem install brakeman --version ${BRAKEMAN_VERSION} --no-format-exec + +########## WPScan Install ########## +#RUN cd /tmp && git clone https://github.com/wpscanteam/wpscan-v3 +#RUN cd /tmp/wpscan-v3 && bundle install && rake install && cd ../ +RUN gem install wpscan + +#Update WPScanner DB +RUN wpscan --update + +ENTRYPOINT ["launch.py"] + +HEALTHCHECK --interval=1m --retries=2 --timeout=5s CMD python /usr/bin/appsecpipeline/tools/health.py diff --git a/dockers/base/dockerfile-sast b/dockers/base/dockerfile-sast new file mode 100644 index 0000000..9e8402d --- /dev/null +++ b/dockers/base/dockerfile-sast @@ -0,0 +1,45 @@ +FROM kalilinux/kali-linux-docker + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + ca-certificates \ + git \ + python-pip \ + python2.7 \ + python2.7-dev \ + openjdk-9-jre-headless \ + cloc \ + unzip + +########## AppSecPipeline Install ########## +COPY tools /usr/bin/appsecpipeline/tools +COPY dockers/base/setupdocker.sh /tmp +ENV PATH="/usr/bin/appsecpipeline/tools:${PATH}" +RUN sh /tmp/setupdocker.sh +RUN rm /tmp/setupdocker.sh + +########## Bandit Install ########## +RUN pip install -U bandit + +########## Dependency Checker Install ########## +RUN wget -O /tmp/dependency-check.zip https://bintray.com/jeremy-long/owasp/download_file?file_path=dependency-check-3.0.2-release.zip && \ + unzip /tmp/dependency-check.zip -d /usr/bin/ && \ + rm /tmp/dependency-check.zip + +#Update the NVD local database for dependency checker +RUN /usr/bin/dependency-check/bin/dependency-check.sh --updateonly + +RUN chown -R appsecpipeline: /usr/bin/dependency-check + +#Dependency check needs write permission on the data directory +RUN chmod -R u=rwx /usr/bin/dependency-check/data + +USER appsecpipeline + +ENTRYPOINT ["launch.py"] + +HEALTHCHECK --interval=1m --retries=2 --timeout=5s CMD python /usr/bin/appsecpipeline/tools/health.py diff --git a/dockers/base/dockerfile-zap b/dockers/base/dockerfile-zap new file mode 100644 index 0000000..dff8a18 --- /dev/null +++ b/dockers/base/dockerfile-zap @@ -0,0 +1,38 @@ +FROM owasp/zap2docker-stable + +USER root + +RUN apt-get update \ + && apt-get upgrade -y \ + && apt-get install -y \ + build-essential \ + ca-certificates \ + python-pip \ + python2.7 \ + python2.7-dev + +########## AppSecPipeline Install ########## +COPY tools /usr/bin/appsecpipeline/tools +ENV PATH="/usr/bin/appsecpipeline/tools:${PATH}" + +#Python dependency installs +RUN pip install -U requests +RUN pip install -U junit_xml_output +RUN pip install -U defectdojo_api +RUN pip install -U cryptography + +RUN chmod +x /usr/bin/appsecpipeline/tools/launch.py +RUN chmod +x /usr/bin/appsecpipeline/tools/junit.py + +########## Zap Baseline Install ########## +#Override the baseline zap python script +COPY tools/zap/zap-baseline.py /zap/ + +RUN usermod -u 1000 zap +RUN groupmod -g 1000 zap + +USER zap + +ENTRYPOINT ["launch.py"] + +HEALTHCHECK --interval=1m --retries=2 --timeout=5s CMD python /usr/bin/appsecpipeline/tools/health.py diff --git a/dockers/base/setupdocker.sh b/dockers/base/setupdocker.sh new file mode 100644 index 0000000..3251672 --- /dev/null +++ b/dockers/base/setupdocker.sh @@ -0,0 +1,11 @@ +#Python dependency installs +pip install -U pyyaml +pip install -U requests +pip install -U junit_xml_output +pip install -U defectdojo_api +pip install -U cryptography + +chmod +x /usr/bin/appsecpipeline/tools/launch.py +chmod +x /usr/bin/appsecpipeline/tools/junit.py + +useradd -m -d /home/appsecpipeline appsecpipeline -u 1000 diff --git a/setup.bash b/setup.bash new file mode 100644 index 0000000..582570f --- /dev/null +++ b/setup.bash @@ -0,0 +1,80 @@ +#!/usr/bin/env bash +echo "==================================================================================" +echo "Welcome to the AppSecPipeline! This is a quick script to get you up and running." +echo +echo "Requirements:" +echo " You'll need the URL to your Jenkins sever, username and password" +echo "==================================================================================" +echo + +#JENKINS path +JENKINS_PATH="pipelines/jenkins" + +#Create the config/env files for environment specific configuration +if [[ ! -e $JENKINS_PATH/config/env ]]; then + echo "creating directory" + mkdir pipelines/jenkins/config/env +fi + +#Copy the jenkins configuration file +cp $JENKINS_PATH/config/template/jenkins_job.ini.template pipelines/jenkins/config/env/jenkins_job.ini + +unset HISTFILE + +#read -p "Setting up Jenkins? Not necessary for a local install (y/n): " JENKINS +#if [ $JENKINS == 'y' ] +#then + read -p "Jenkins Server: (http://jenkins-server:8080): " JENKINSSERVER + echo $JENKINSSERVER + read -p "Jenkins Username: " JENKINSUSER + stty -echo + read -p "Jenkins Password: " JENKINSPASS; echo + stty echo + + #OSX uses an older version of sed + if [ "$(uname)" == "Darwin" ]; then + #Save the settings in the configuration file + sed -i "" "s~jenkins-server~$JENKINSSERVER~g" $JENKINS_PATH/config/env/jenkins_job.ini + sed -i "" "s/jenkins-builder/$JENKINSUSER/g" $JENKINS_PATH/config/env/jenkins_job.ini + sed -i "" "s/jenkins-password/$JENKINSPASS/g" $JENKINS_PATH/config/env/jenkins_job.ini + sed -i "" "s/jenkins-password/$JENKINSPASS/g" $JENKINS_PATH/config/env/jenkins_job.ini + else + #Save the settings in the configuration file + sed -i "s~jenkins-server~$JENKINSSERVER~g" $JENKINS_PATH/config/env/jenkins_job.ini + sed -i "s/jenkins-builder/$JENKINSUSER/g" $JENKINS_PATH/config/env/jenkins_job.ini + sed -i "s/jenkins-password/$JENKINSPASS/g" $JENKINS_PATH/config/env/jenkins_job.ini + sed -i "s/jenkins-password/$JENKINSPASS/g" $JENKINS_PATH/config/env/jenkins_job.ini + fi + echo "Jenkins Builder configuration file created in: $JENKINS_PATH/config/jenkins_job.ini" + echo +#fi + +echo "Creating the virtual environment" +#create the virtual environment +virtualenv venv + +echo "Activating the virtual environment" +#activate virtual environment +. venv/bin/activate + +echo +echo "Installing required packages...." + +#install the requirements +pip install -r requirements/requirements.txt + +echo "Installing jenkins job builder" +git clone https://github.com/openstack-infra/jenkins-job-builder.git +cd jenkins-job-builder && pip install -e . +cd ../ + +echo +echo "Creating Jenkins Jobs" +sh build/jenkins.sh + +echo +echo +echo "==============================================================================" +echo "Complete!" +echo "==============================================================================" +echo diff --git a/tools/.DS_Store b/tools/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0144a8d6eba6c0b16f5bc8600a947adb57729c4c GIT binary patch literal 18436 zcmZQzU|@7AO)+F(aA06y;9!8z0z3>17TgRB3Oo!92HXsA0Fnn`xB!g73^Ip-fgyo` zfq{jAL4biFl_8lSn<1AWk)a5tXp|lefzc2c4S~@R7!85Z5ExV;zzA^-2U1-R5g8>% zLtr!nMtlf>$_E8#8z0nB0P!0jd=Qg?k%55;6iAHV=0AwV01^`bu^1T`Kw3e3kX8^4 z(#pUHVu8&7Yh_>r^&G%_NEd?<)Qtf1!P*%Z!Ce%PxnS)Kj9{A?7#Jbi85mhWwlOd; zLbNk5f^BDDV1#IAU<BI^Q8P-8hQMeDpoV}20|NsWLkdGGLmIgEUc!*Vkk63Cke{3H z;*wHYlFYzxE%9D7SQ8F)DMYC&$S@2}&d)6Xo5TRl%vjAU3ogpb$<Ip%mjR$b1(4ec z7*ZMXz^=?=NM@*H&}B$w$N;-HJ0~64T$VQsMCd{Co8_LrU>6C1O~+$TZa&<fMC(EI zGq_}8W=LnqWGF#(wG61Rg32Jdt>=+E*llnbxC_u^Q0)Pic&rS044Dks3?&TtN#(@_ zNjdpR3=9k>lL~S&i%Sd)t}`+*v#_$Ub8vEUaPo4*2501#2bUz4lomTB7Da=3nJKAB zP<BXueh!?Sm=u<oS{^ST;+&sXl9-v7T7;xLCOk7QCBNJ$zceqU7;H{JVo63)ViDM) zcmav(Y7-+J1tVjlS{;RILjzL-9R)K3i`rTa4slgOThD~t%Bt#`+PaxwkHbrQ27U-1 zPR?RrU_kOBYH$jlM3XHpbA|)}({xw>+<*rF7q_&h7ne^&W^#UBaAs9%ya0c3Mt-?x za(-SwQEFLcYI%5aeokp_UU9sDU~*zka%oOtNvdN`4oGovL`iBz35XR{oL^Mp3|1H~ zAW&MI8kCxoSdv+m>XKNJS{yGR1Tx9Lpd>RtuQ)8Vs5mn}Pual$;vvq}JfgBnYT5>- zR`xDec=`ARxP-Vw!pky?Gm~;sL(4N$N-`o7i<49HQZn<>qcT%c^GY()GE<8<1sQL1 zhU6vYrd|;c6PJ*bl9onS4KpR8G%2&NG_#~qK$(N%iU1!!ubjLzS4>J`Nvdyt3dpn+ zfvp^zU>Rk3Y3XQ?jB`<HVo7QWRD-&PG*?J*W>u;{B2=M{E*Dowc4l6RKrtsotDzB> zbZAauQfiJs3Ma%KGjlHK@G^*(1-zkx);3(y5y|<vxv6<2#e$4FP+<p0Y3VqS%|406 zCH@7ed8sJ^CqUMViA%W3OG`(B<Q!8{KyKntcF=%?A2<X!1sEAP7$g~#7)%&!7y=j~ z8B!R^8G0C|GE8Gw#ITxSGs8}X!wlyb&NEzMxXf^a;UU9whK~$C8JQSa7+D$F82K4x z808q{85I~c8BH0@7|j_S7+o3N89f+18G{(Z86z2^7^4|e7_%627;_o(7|R*!85<d! z7@HY;7^gDMV4TS~i*YgICdMs{TN$@8?q@v7c#82f;~B=QjJFtXL)^&(ZM-n}LijL} z@h(P!h2~hMIBW@4%FUh2Wk3_`O&(Ax6%-N{5tUQc&^0o*afHVvTKpj=Byj41BoA=1 zfh7xY1frzHE0E;OB@0Wx=;=B%GcP5zf&-Lv1lS>}6{M6n1Ws@Wa6q`KQtBK6TR8-n zz+5gZDIHFML=FLFFjq>S%aB7Lg;RhP%;hrSGUF5|<`iH7bEPb~tT_a{IR)6RNQz5{ z+1hb&2{3^28YftqOI6B+lS6<JJq6-Se=8X_F&tz#%W#R|2E$#3rwp$czB4i~axn5S zN`TXx5~DJsDx)5wKBFO{5u*vCEu$Ty1EV9OGoufqFQY$W0AmniG-C{79Ai9VB4ZX~ zHaG<qFjg{FG1f5FGS)M8Fm^HaGWId{LsQ}$#<`5k7*|757(Cs1LijL}aSdv^L#lgB zBkRDbkP2C(8W4*tY8?cs(2xsgv|62oArCxDP=H!+bMFGrBtYemT;o&q3+x)WJ|wqb z$f3FfT<fwk6fhJqq%uJ2_cBylBSE<nO$CzO$tJ8|yD?P2YhxTLP~8D;@o<1$QNoZ3 zrn4A|QC-3k1D=inxfDqal3OhAz6ZMn!yKgWz^Mk+J>YO*VJK$EW2j_6Z$<fm;{hs% zWWTk$B-nnqJ|ufF<WTJcrw9&)Vuk{Se1;N+B!*IkbaekSON0H7rUuD=pIML=5r#QP z?!c)A)jeSUvojPk6f@*7<S-;MB%!-TY#{>!1DXmXxA?uE0}cxe74UR{Lj|fkz)b*7 zh7xcK7vzc@hE#@3hJ2KiaZ~Ub0|NsVRY>k>Y(EBe4^~xhH{nr*>@G%z3n0VT7|Iz6 z7>XH^84?-tP+Y_SZnL7uBe^BQeg{Z91DZVC6<Fnw?Pp*BhYK@96+<EeS}Eb$&%nR{ zl|i!8do3^6PPh!*HZ&Pjdji0kMv2i77?B|$1uj<-84AGpynrDS+z&1qWPL_sQwtph zBLiblr_sX5L`T8Q(1@%)BPXahLG>z1vBs$V8he1`jP@aOQb+rcgS-!kEqNE0jrJwM zLnEVoNt*X1v7|qw7Fve{q!))(6}+{EM-@uTjS<{(!(tX%Uy~iY8Z8k#Vw}g2Iml`@ zs*Q%Cc@w2pV-Sk~cV$4!^56k6TB9Y6)@Xx%R1`f~kJe`35lAWN(b|mWwHaF4L#oM4 zBlEz$MGO`2S`3E@lo|}OLITYUwAzb}Aqh10!jOVFYbbn%fq?-<9?6ATmXIkkG<mrD zu*zd`9S1`aLlHwFLpDPyc*GWM6o`2Sc*G7(4U#*o4IpzJ80H|2*x^)z>K^cj9VfUR zM_y-MfmEZPg3Xo;)cmGob+w_Tr6H)ZY69x4nps-tD43X9VsutP6PTbWZrF?vB(}hb z3QWufcSGT`+~BY;VyHl^)+d>P)lx$6;IIc#&PKw*6nV;2%Egt-9qXhfO{P!bb6o<A zoS0Kj4#RZn2{Z-9CCz2e$tl19F7>%or8qe`&}W_C)5zfICp`uS1`mc9@MK3bLk~kA zLqB->a~H!ghSLle8LlzhV|dK)f#D}3BO^PbAfq^=1fw)~>QkLjgHel7n^6}$`Dw{$ z#c0Fm%IF530QF`JVGLyqXN+KsVoYXCVN7F8XUt?QVk~AXWh`T?fKG(Af+wIRFivEg z#W<gF0VG|*C!ahZd>F~N5_{4^Dm>cGnS)hh$@ECY2Od?Z1qgUZ8;e<JMJ@|NGD8kS zK0`8UW@7>^6oJbjxzaRvAJ~;ha`0jnLk`tN;35s{G8m*nZBQ)QFgMguFf=s;m1z_W zz+v?)YCy+?<64t6+GQE-vJCQt90Rr*3auH5nx(Mx{}~{QcR|Z&K}%?{6pKi?Z4$ON zfTV{fm-oQ50TAn@?3}^tr38hzxRBOGp)VGJEg}K0GXX6b0k7lWm6Vc{fk~lj;Sf;f zfRA^B?U0j~#Ijxrv<^mDUQ!ZynF|L8ScAHTB+_~*kV4e;QXmmSBQ8nEf*A06DUgtv zIhQ1CVGMY^6iCooh)WXQ%;AKXZ|@)}iMH4VyjV(1T!L3pUeb+&18KPw+F~ifjfU9_ zOBmKOY-QNbaGc=`!+C}a3>O)0F+5^;!SIRUGs9PgZw&t#IT%I34G2j_CGhGM14c{e z$`vO@Z}6&>V8#%}SjJ?=RB)4_fU$zHim{rph7s0On8~<;aTVig#x>woQ`;F2G9F?) z&3J+FBDhLrfHxj|Abc3fcniF81;^Ue97OANbo~^LRfMDKrx>ZQehRXx5uTYw7jlmD zg`7jF{%3=frVgO}f36G+xc2`;RE?6OAut*O!!iUISzLl$oIs<IIJ^hyB!Jd$f)4rs zDFF>yg4VM#g7)P>^nsLsw)TPg5};jypzV0z0Ut&N2Jl2JBjlVAkamzdkoJMw3`n*0 V)1&qO=td53l7<kBcZW><4*={N)KUNd literal 0 HcmV?d00001 diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 0000000..efb414e --- /dev/null +++ b/tools/README.md @@ -0,0 +1,265 @@ +# AppSecPipeline Tools + +A curated list of tools that are packaged in the AppSecPipeline docker images. + +### Config YAML + +Tool are launched via a YAML file in the tool folder name. For example arachni consists of: tools/arachni/config.yaml + +**Yaml Tool Specification:** + +View one of the samples in tools/<toolname>/config.yaml for configuring a new tool. + +#### Tool Environment Variables +* All variable names in uppercase +* If the tool requires an API key, preface the variable with TOOLNAME-API_KEY +* Review the common variable names below to eliminate duplication and to make the tool easier to configure and run. + +#### Common Tool Environment Variables + +**DAST Specific** + +Name | Description +-------- | --- +URL | URL or IP Address of host +LOGIN_URL | Login URL for host +LOGIN_PARMS | Typically username=test&password=password +LOGIN_SUCCESS | Regular expression for text the scanner should look for on succsful login. +LOGIN_LOGOUT_PATTERN | Specific text for the scanner to look for to avoid logging out. + +**SAST Specific** + +Name | Description +-------- | --- +LOC | Location of the source to scan. + +### Static Tools + +Static Tools are tagged based on the languages the tools supports. The pipeline runs the tool cloc to discover the languages the repository uses. Based off the results of cloc the appropriate SAST tool is launched. The naming convention for the languages must match what cloc has configured. + +<pre> +ABAP (abap) +ActionScript (as) +Ada (ada, adb, ads, pad) +ADSO/IDSM (adso) +AMPLE (ample, dofile, startup) +Ant (build.xml, build.xml) +ANTLR Grammar (g, g4) +Apex Trigger (trigger) +Arduino Sketch (ino, pde) +ASP (asa, asp) +ASP.NET (asax, ascx, asmx, aspx, master, sitemap, webinfo) +AspectJ (aj) +Assembly (asm, s, S) +AutoHotkey (ahk) +awk (awk) +Blade (blade.php) +Bourne Again Shell (bash) +Bourne Shell (sh) +BrightScript (brs) +builder (xml.builder) +C (c, ec, pgc) +C Shell (csh, tcsh) +C# (cs) +C++ (C, c++, cc, cpp, CPP, cxx, pcc) +C/C++ Header (H, h, hh, hpp, hxx) +CCS (ccs) +Chapel (chpl) +Clean (dcl, icl) +Clojure (clj) +ClojureC (cljc) +ClojureScript (cljs) +CMake (cmake, CMakeLists.txt) +COBOL (CBL, cbl, COB, cob) +CoffeeScript (coffee) +ColdFusion (cfm) +ColdFusion CFScript (cfc) +Coq (v) +Crystal (cr) +CSON (cson) +CSS (css) +Cucumber (feature) +CUDA (cu, cuh) +Cython (pyx) +D (d) +DAL (da) +Dart (dart) +diff (diff) +DITA (dita) +DOORS Extension Language (dxl) +DOS Batch (bat, BAT, BTM, btm, CMD, cmd) +Drools (drl) +DTD (dtd) +dtrace (d) +ECPP (ecpp) +EEx (eex) +Elixir (ex, exs) +Elm (elm) +ERB (erb, ERB) +Erlang (erl, hrl) +Expect (exp) +F# (fsi, fs, fs) +F# Script (fsx) +Focus (focexec) +Forth (4th, e4, f83, fb, forth, fpm, fr, frt, ft, fth, rx, fs, f, for) +Fortran 77 (F, f77, F77, FOR, ftn, FTN, pfo, f, for) +Fortran 90 (F90, f90) +Fortran 95 (f95, F95) +Freemarker Template (ftl) +GDScript (gd) +Glade (glade, ui) +GLSL (comp, frag, geom, glsl, tesc, tese, vert) +Go (go) +Grails (gsp) +GraphQL (gql, graphql) +Groovy (gant, gradle, groovy) +Haml (haml) +Handlebars (handlebars, hbs) +Harbour (hb) +Haskell (hs, lhs) +Haxe (hx) +HLSL (cg, cginc, hlsl, shader) +HTML (htm, html) +IDL (idl, pro) +Idris (idr) +INI (ini) +InstallShield (ism) +Java (java) +JavaScript (es6, js) +JavaServer Faces (jsf) +JCL (jcl) +JSON (json) +JSP (jsp, jspf) +JSX (jsx) +Julia (jl) +Kermit (ksc) +Korn Shell (ksh) +Kotlin (kt, kts) +LESS (less) +lex (l) +LFE (lfe) +liquid (liquid) +Lisp (asd, el, lisp, lsp, cl, jl) +Literate Idris (lidr) +LiveLink OScript (oscript) +Logtalk (lgt, logtalk) +Lua (lua) +m4 (ac, m4) +make (am, Gnumakefile, gnumakefile, Makefile, makefile, mk) +Mako (mako) +Markdown (md) +Mathematica (mt, wl, wlt, m) +MATLAB (m) +Maven (pom, pom.xml) +Modula3 (i3, ig, m3, mg) +MSBuild script (csproj, vbproj, vcproj, wdproj, wixproj) +MUMPS (mps, m) +Mustache (mustache) +MXML (mxml) +NAnt script (build) +NASTRAN DMAP (dmap) +Nemerle (n) +Nim (nim) +Objective C (m) +Objective C++ (mm) +OCaml (ml, mli, mll, mly) +OpenCL (cl) +Oracle Forms (fmt) +Oracle Reports (rex) +Pascal (dpr, p, pas) +Pascal/Puppet (pp) +Patran Command Language (pcl, ses) +Perl (perl, plh, plx, pm, pm6, pl) +PHP (php, php3, php4, php5, phtml) +PHP/Pascal (inc) +Pig Latin (pig) +PL/I (pl1) +PO File (po) +PowerBuilder (sra, srf, srm, srs, sru, srw) +PowerShell (ps1, psd1, psm1) +Prolog (P, pl, pro) +Protocol Buffers (proto) +Pug (pug) +PureScript (purs) +Python (py) +QML (qml) +Qt (ui) +Qt Linguist (ts) +Qt Project (pro) +R (r, R) +Racket (rkt, rktl, scrbl) +RapydScript (pyj) +Razor (cshtml) +Rexx (rexx) +RobotFramework (robot, tsv) +Ruby (rake, rb) +Ruby HTML (rhtml) +Rust (rs) +SAS (sas) +Sass (sass, scss) +Scala (scala) +Scheme (sc, sch, scm, sld, sls, ss) +sed (sed) +SKILL (il) +SKILL++ (ils) +Slice (ice) +Slim (slim) +Smalltalk (st, cs) +Smarty (smarty, tpl) +Softbridge Basic (SBL, sbl) +Solidity (sol) +Specman e (e) +SQL (psql, SQL, sql) +SQL Data (data.sql) +SQL Stored Procedure (spc.sql, spoc.sql, sproc.sql, udf.sql) +Standard ML (fun, sig, sml) +Stata (do, DO) +Stylus (styl) +Swift (swift) +Tcl/Tk (itk, tcl, tk) +Teamcenter met (met) +Teamcenter mth (mth) +TeX (bst, dtx, sty, tex) +TITAN Project File Information (tpd) +Titanium Style Sheet (tss) +TOML (toml) +TTCN (ttcn, ttcn2, ttcn3, ttcnpp) +Twig (twig) +TypeScript (tsx, ts) +Unity-Prefab (mat, prefab) +Vala (vala) +Vala Header (vapi) +Velocity Template Language (vm) +Verilog-SystemVerilog (sv, svh, v) +VHDL (VHD, vhd, vhdl, VHDL) +vim script (vim) +Visual Basic (bas, cls, ctl, dsr, frm, VB, vb, vba, VBA, vbs, VBS) +Visual Fox Pro (sca, SCA) +Visualforce Component (component) +Visualforce Page (page) +Vuejs Component (vue) +Windows Message File (mc) +Windows Module Definition (def) +Windows Resource File (rc, rc2) +WiX include (wxi) +WiX source (wxs) +WiX string localization (wxl) +XAML (xaml) +xBase (prg) +xBase Header (ch) +XHTML (xhtml) +XMI (xmi, XMI) +XML (XML, xml) +XQuery (xq, xquery) +XSD (XSD, xsd) +XSLT (xsl, XSL, XSLT, xslt) +yacc (y) +YAML (yaml, yml) +zsh (zsh) +</pre> + +Running a tool using the launch script: + +``` +docker run --rm -ti -v /your/path/AppSecPipelineReports:/var/appsecpipeline/reports/ appsecpipeline/base /usr/bin/appsecpipeline/tools/launch.py --tool nmap -p regular TARGET=localhost api_key=$DOJO_API_KEY host=$DOJO_HOST product=$DOJO_PRODUCT_ID +``` diff --git a/tools/appspider/AppSpider.py b/tools/appspider/AppSpider.py new file mode 100644 index 0000000..c725235 --- /dev/null +++ b/tools/appspider/AppSpider.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python + +"""AppSpider.py: CLI for AppSpider.""" + +__author__ = "Aaron Weaver" +__copyright__ = "Copyright 2017, Aaron Weaver" + +import argparse +import os +import PyAppSpider +import zipfile +import time +import sys +import uuid + +authOK = False + +#Look at possibly adding URL to support once client multi-url's +def current_scan_in_progress(url=None): + scan_in_progress = False + scans = appspider.get_scans() + print "Checking to see if a scan is running for this client: " + client + + if scans.is_success(): + for scan in scans.json()["Scans"]: + if appspider.get_scan_status_text(scan["Status"]) == "Running": + scan_in_progress = True + exit + + return scan_in_progress + +#Initiates a scan based on a profile and then polls until completion +def scan_poll(appspider, config, output_file): + + #Avoid running multiple scans if a prior scan has not completed + if current_scan_in_progress(): + print "\nScan already running exiting.\n" + quit() + + scan_id = None + scan_status = appspider.run_scan(configName=config) + scan_status_flag = False + scan_has_report_flag = False + + if scan_status.is_success(): + scan_id = scan_status.json()["Scan"]["Id"] + print "Scan queued. ID is: " + scan_id + + #Check to see if scan is complete, poll until finished + while scan_status_flag == False: + time.sleep(2) + sys.stdout.write(".") + sys.stdout.flush() + scan_status = appspider.is_scan_finished(scan_id).json() + scan_status_flag = scan_status["Result"] + if scan_status_flag: + print "\nCompleted Scan!" + + #Check for report + while scan_has_report_flag == False: + time.sleep(2) + sys.stdout.write(".") + sys.stdout.flush() + scan_status = appspider.scan_has_report(scan_id).json() + scan_has_report_flag = scan_status["Result"] + if scan_has_report_flag: + print "\nReport exists in AppSpider, downloading report." + targetFile = os.path.basename(output_file) + targetDirectory = os.path.dirname(output_file) + zipfilename = "AppSpider_" + str(uuid.uuid4()) + ".zip" + zip_download(appspider, scan_id, os.path.join(targetDirectory,zipfilename)) + unzip_extract_delete(appspider, scan_id, targetDirectory, zipfilename, output_file) + +def zip_download(appspider, scan_id, zipName): + print "Downloading the zip file." + #Retrieve the zip file + vulnerabilities = appspider.get_report_zip(scan_id) + #Save the file + print "Zip filename: " + zipName + appspider.save_file(vulnerabilities.binary(), zipName) + +def unzip_extract_delete(appspider, scan_id, destination, zipName, targetFile): + archive = zipfile.ZipFile(os.path.join(destination, zipName)) + archive.extract('VulnerabilitiesSummary.xml', destination) + print "Removing Zip File: " + os.path.join(destination, zipName) + #Remove the zip file + os.remove(os.path.join(destination, zipName)) + #Rename the findings file to the user specified filename + print "Renaming VulnerabilitiesSummary.xml: " + os.path.join(destination, 'VulnerabilitiesSummary.xml') + os.rename(os.path.join(destination, 'VulnerabilitiesSummary.xml'), targetFile) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='AppSpider API Client.', prefix_chars='--') + + parser.add_argument('--url', help='AppSpider URL.', default=None) + parser.add_argument('--username', help='AppSpider username.', default=None) + parser.add_argument('--password', help='AppSpider password.', default=None) + parser.add_argument('--admin-username', help='AppSpider admin username. (Used for global admin features)', default=None) + parser.add_argument('--admin-password', help='AppSpider admin password. (Used for global admin features)', default=None) + parser.add_argument('--client', help='Client name.', default=None) + parser.add_argument('--engine-group', help='Engine group for scanning.', default=None) + parser.add_argument('--proxy', help='Proxy for client to use for requests.', default=None) + + #AppSpider specific Functions + parser.add_argument('--scans', help='Retrieve the scans status.', default=False, action='store_true') + parser.add_argument('--configs', help='Retrieves all the scan configurations.', default=False, action='store_true') + parser.add_argument('--vulns', help='Retrieves all the vulnerabilites for the specified client.', default=False, action='store_true') + parser.add_argument('--vulns-summary', help='Gets VulnerabilitiesSummary.xml for the scan. Requires a scan id and output file.', default=False, action='store_true') + parser.add_argument('--scan-id', help='Scan id for the specified client.', default=None) + parser.add_argument('--output-file', help='Name of the output file.', default=None) + parser.add_argument('--report-zip', help='Retrieves the zip report file. Requires a scan id and output file.', default=False, action='store_true') + parser.add_argument('--zip-extract-summary', help='Destination for the VulnerabilitiesSummary.xml and then delete the zip file.', default=None) + parser.add_argument('--crawled-links', help='Retrieves the crawled links. Requires a scan id and output file.', default=False, action='store_true') + parser.add_argument('--run-scan', help='Runs the scan with the specified scan name.', default=None) + parser.add_argument('--run-scan-poll', help='Runs the scan with the specified config and polls to completion.', default=None) + parser.add_argument('--create-config', help='Creates a scan configuration', default=None, action='store_true') + parser.add_argument('--create-run', help='Creates a scan configuration', default=None, action='store_true') + parser.add_argument('--create-engine-group', help='Engine group for a scan configuration', default=None) + parser.add_argument('--create-name', help='Config name', default=None) + parser.add_argument('--create-xml', help='XML configuration for scan', default=None) + parser.add_argument('--create-seed-url', help='Starting URL for scan', default=None) + parser.add_argument('--create-constraint-url', help='Include url constraint, example: http://www.yoursite.com/*', default=None) + parser.add_argument('--create-custom-header', help='Custom Header (API Token in header for example)', default=None) + parser.add_argument('--engines', help='Lists the engines configured in AppSpider Enterprise', default=False, action='store_true') + parser.add_argument('--engine-groups', help='Lists the engine groups configured in AppSpider Enterprise', default=False, action='store_true') + + arguments = parser.parse_args() + + #Environment by default override if specified in command line args + url = arguments.url if arguments.url is not None else os.environ.get('APPSPIDER_URL') + username = arguments.username if arguments.username is not None else os.environ.get('APPSPIDER_USERNAME') + password = arguments.password if arguments.password is not None else os.environ.get('APPSPIDER_PASSWORD') + admin_username = arguments.username if arguments.username is not None else os.environ.get('APPSPIDER_ADMIN_USERNAME') + admin_password = arguments.password if arguments.password is not None else os.environ.get('APPSPIDER_ADMIN_PASSWORD') + client = arguments.client if arguments.client is not None else os.environ.get('APPSPIDER_CLIENT') + engine_group = arguments.engine_group if arguments.engine_group is not None else os.environ.get('APPSPIDER_ENGINE_GROUP') + proxy = arguments.proxy if arguments.proxy is not None else os.environ.get('APPSPIDER_PROXY') + + #Validate all parameters have been supplied for login + if url == None or username == None or password == None: + print "Please specify the AppSpider URL, username and password for login.\n" + quit() + + proxies = None + if proxy is not None: + proxies = { + 'http': proxy, + 'https': proxy, + } + + #Authenticate + appspider = PyAppSpider.PyAppSpider(url, debug=False, proxies=proxies, verify_ssl=False) + admin_appspider = PyAppSpider.PyAppSpider(url, debug=False, proxies=proxies, verify_ssl=False) + authenticated = appspider.authenticate(username, password) + + #If admin credentials are specified + if admin_username is not None: + admin_authenticated = admin_appspider.authenticate(admin_username, admin_password) + + if appspider.loginCode == 1: #Single client + authOK = True + elif appspider.loginCode == 2 and client is None: #Multi client + print "The following clients are available to this user:" + + for spiderClient in appspider.clients: + print spiderClient + + print "\nRe-run the utility with the --client parameter use one of the client name specified in the list above. Alternatively set the APPSPIDER_CLIENT environment variable.\n" + elif appspider.loginCode == 2 and client is not None: #Multi client specified + #Authenticate and find the client guid + authenticated = appspider.authenticate(username, password) + clientId = None + for spiderClient in appspider.clients: + if client == spiderClient: + clientId = appspider.clients[client] + if clientId is not None: + authenticated = appspider.authenticate(username, password, clientId) + + if appspider.loginCode == 1: + authOK = True + else: + print "Invalid Client Name" + print authenticated.data_json(pretty=True) + else: + print "Authentication problem: " + authenticated.error() + + #Authenticated, let's do something fun + if authOK == True: + #Retrieve the scans and status + if arguments.scans: + scans = appspider.get_scans() + print "Scan status for client: " + client + + if scans.is_success(): + for scan in scans.json()["Scans"]: + print "Status: " + appspider.get_scan_status_text(scan["Status"]) + print "Scan ID: " + scan["Id"] + for target in scan["Targets"]: + print "URL: " + target["Host"] + print "Started: " + scan["StartTime"] + + if scan["CompletionTime"] is not None: + print "Completed: " + scan["CompletionTime"] + else: + print "Not Completed" + print + else: + print "No scans found" + #Retrieve vulnerablities + elif arguments.vulns: + vulnerabilities = appspider.get_vulnerabilities() + print "Retrieving vulnerablities for client: " + client + if vulnerabilities.is_success(): + print "Total Count: " + str(vulnerabilities.json()["TotalCount"]) + for vulnerability in vulnerabilities.json()["Findings"]: + print "Vuln Type: " + vulnerability["VulnType"] + print "Vuln Type: " + vulnerability["VulnUrl"] + print "Vuln Type: " + vulnerability["Description"] + print + else: + print "No vulnerabilities found" + elif arguments.vulns_summary: + if arguments.scan_id is not None and arguments.output_file is not None: + vulnerabilities = appspider.get_vulnerabilities_summary(arguments.scan_id) + print "Retrieving vulnerablities for client: " + client + appspider.save_file(vulnerabilities.binary(), arguments.output_file) + else: + print "Scan id or out file needed." + elif arguments.report_zip: + if arguments.report_zip is not None and arguments.output_file is not None: + zip_download(appspider, arguments.scan_id, zipName=arguments.output_file) + print "Retrieving Zip file for client: " + client + if arguments.zip_extract_summary is not None: + unzip_extract_delete(appspider, arguments.scan_id, '', zipName=arguments.output_file) + else: + print "Scan id or out file needed." + elif arguments.crawled_links: + if arguments.crawled_links is not None and arguments.output_file is not None: + vulnerabilities = appspider.get_crawled_links(arguments.scan_id) + print "Retrieving crawled links file for client: " + client + appspider.save_file(vulnerabilities.binary(), arguments.output_file) + else: + print "Scan id or out file needed." + #Get the current configurations + elif arguments.configs: + print "Retrieving client config:\n" + configs = appspider.get_configs() + print "Configurations for client: " + client + + if configs.is_success(): + for config in configs.json()["Configs"]: + print "Config Name: " + config["Name"] + #Run a scan + elif arguments.run_scan is not None: + print "Attempting to run a scan\n" + scan_status = appspider.run_scan(configName=arguments.run_scan) + if scan_status.is_success(): + print "Scan queued. ID is: " + scan_status.json()["Scan"]["Id"] + #Run a scan + elif arguments.run_scan_poll is not None: + print "Scanning target config, polling and downloading report." + scan_poll(appspider, arguments.run_scan_poll, arguments.output_file) + #Create a scan config + elif arguments.create_config is not None: + print "Creating a scan config\n" + if arguments.create_xml is not None: + #Find the guid fromt the scanner group name + groupId = None + groups = admin_appspider.admin_get_all_engine_groups() + + if groups.is_success(): + for groups in groups.json()["EngineGroups"]: + if groups["Name"] == arguments.create_engine_group: + groupId = groups["Id"] + + seed_urls = [] + seed_url = {} + + if arguments.create_seed_url is not None: + seed_url['url'] = arguments.create_seed_url + seed_urls.append(seed_url) + + scope_constraints = [] + scope_constraint = {} + if arguments.create_constraint_url is not None: + scope_constraint['url'] = arguments.create_constraint_url + scope_constraints.append(scope_constraint) + + custom_headers = [] + custom_header = {} + if arguments.create_custom_header is not None: + custom_header['custom_header'] = arguments.create_custom_header + custom_headers.append(custom_header) + + #Save config + if groupId is not None: + save_config = appspider.save_config(arguments.create_xml, arguments.create_name, groupId, clientId, seed_urls=seed_urls, scope_constraints = scope_constraints, custom_headers=custom_headers) + + if save_config.is_success(): + print "Saved succesfully" + if arguments.create_run is not None: + scan_status = appspider.run_scan(configName=arguments.create_name) + if scan_status.is_success(): + print "Scan queued. ID is: " + scan_status.json()["Scan"]["Id"] + else: + print "Config did not save, please review the message below." + print save_config.data_json(pretty=True) + else: + print "Group not found. Please verify the group name:" + print groups.data_json(pretty=True) + else: + print "XML file required to create a config. Re-run and specify the XML file exported from AppSpider. (--create_xml)" + + #List Engines configured + elif arguments.engines: + print "Listing engines configured in AppSpider.\n" + if admin_appspider.loginCode == 1: + print admin_appspider.admin_get_engines().data_json(pretty=True) + else: + print "Not authenticated as an administrator." + #Admin: List Engines Groups configured + elif arguments.engine_groups: + print "Listing engines groups configured in AppSpider.\n" + if admin_appspider.loginCode == 1: + groups = admin_appspider.admin_get_all_engine_groups() + print "Engine Groups configured on AppSpider:" + if groups.is_success(): + for groups in groups.json()["EngineGroups"]: + print "Group Name: " + groups["Name"] + else: + print "No action specified or action not found.\n" diff --git a/tools/appspider/PyAppSpider.py b/tools/appspider/PyAppSpider.py new file mode 100644 index 0000000..e699c4b --- /dev/null +++ b/tools/appspider/PyAppSpider.py @@ -0,0 +1,826 @@ +import json +import requests +import requests.exceptions +import requests.packages.urllib3 +from xml.etree import cElementTree as ET + +#from . import __version__ as version + +class PyAppSpider(object): + """An API wrapper for AppSpider Enterprise. + https://appspider.help.rapid7.com/docs/rest-api-overview + """ + + token = None + success = False + loginCode = 0 + clients = None + + def __init__(self, host, api_version='v1', verify_ssl=True, timeout=60, proxies=None, user_agent=None, cert=None, debug=False): + """Initialize a AppSpider Enterprise API instance. + + :param host: The URL for the AppSpider Enterprise server. (e.g., http://localhost:8000/AppSpider Enterprise/) + :param api_key: The API key generated on the AppSpider Enterprise API key page. + :param user: The user associated with the API key. + :param api_version: API version to call, the default is v1. + :param verify_ssl: Specify if API requests will verify the host's SSL certificate, defaults to true. + :param timeout: HTTP timeout in seconds, default is 30. + :param proxies: Proxy for API requests. + :param user_agent: HTTP user agent string, default is "AppSpider Enterprise_api/[version]". + :param cert: You can also specify a local cert to use as client side certificate, as a single file (containing + the private key and the certificate) or as a tuple of both file's path + :param debug: Prints requests and responses, useful for debugging. + + """ + version = "0.2" + self.host = host + 'AppSpiderEnterprise/rest/' + api_version + '/' + self.api_version = api_version + self.verify_ssl = verify_ssl + self.proxies = proxies + self.timeout = timeout + + if not user_agent: + self.user_agent = 'pyAppSpider_api/v' + version + else: + self.user_agent = user_agent + + self.cert = cert + self.debug = debug # Prints request and response information. + + token = None + if not self.verify_ssl: + requests.packages.urllib3.disable_warnings() # Disabling SSL warning messages if verification is disabled. + + def authenticate(self, name, password, clientId=None): + """Returns the AppSpider authentication token and/or client associated with the login. If the account is multi-client then AppSpider returns the list of clients associated with the account. + + :param name: Userid of the appspider user + :param name: Password of the appspider user + :param name: ClientID in AppSpider + + """ + params = {} + + if clientId: + params['clientId'] = clientId + + params['name'] = name + params['password'] = password + + response = self._request('POST', 'Authentication/Login', data=params) + + if response.success: + self.success = response.data["IsSuccess"] + if self.success: + self.token = response.data["Token"] + self.loginCode = 1 #Authenticated + elif response.data["Reason"] == "Invalid clientId": + self.clients = response.data["Clients"] + self.loginCode = 2 #Authenticated but need to select a client id + else: + #Connection error or bad login + self.success = False + + return response + + ###### Helper Functions ###### + + def get_client_name(self, clientId): + """Retrieves the client name from a client id + + :param clientId: Client ID (guid) + + """ + + config = self.get_config(clientId) + + return config.json()["Config"]["Name"] + + def get_scan_status_text(self, statusId): + """Retrieves the client name from a client id + + :param clientId: Status ID (int) + + """ + statusTxt = "Unknown Code: " + str(statusId) + if statusId == 32: + statusTxt = "Completed" + elif statusId == 72: + statusTxt = "Failed" + elif statusId == 80: + statusTxt = "Paused" + elif statusId == 82: + statusTxt = "Running" + elif statusId == 119: + statusTxt = "Vuln Load Failed" + elif statusId == 122: + statusTxt = "Stopping" + + return statusTxt + + def edit_scan_config_xml(self, xml_file, seed_urls, scope_constraints, custom_headers): + """Adds xml elements for scanning url and includes + + :param xml_file: Scanner config xml file + :param seed_urls: seed_url + :param scope_constraints: scope_constraints + + """ + + tree = ET.parse(xml_file) + + xmlRoot = tree.getroot() + xml_node = xmlRoot.findall("CrawlConfig/SeedUrlList") + + for elem in xmlRoot.iterfind('CrawlConfig/SeedUrlList'): + for seed_url in seed_urls: + seedUrl = ET.Element("SeedUrl") + elem.append(seedUrl) + value = ET.Element("Value") + value.text = seed_url['url'] + seedUrl.append(value) + + for elem in xmlRoot.iterfind('CrawlConfig/ScopeConstraintList'): + for scope_constraint in scope_constraints: + scope_constraintXML = ET.Element("ScopeConstraint") + elem.append(scope_constraintXML) + #URL + url = ET.Element("URL") + url.text = scope_constraint['url'] + scope_constraintXML.append(url) + #Method + method = ET.Element("Method") + if 'method' in scope_constraint: + method.text = scope_constraint['method'] + else: + method.text = "All" + scope_constraintXML.append(method) + #MatchCriteria + match_criteria = ET.Element("MatchCriteria") + if "match_criteria" in scope_constraint: + match_criteria.text = scope_constraint["match_criteria"] + else: + match_criteria.text = "Wildcard" + + scope_constraintXML.append(match_criteria) + #Exclusion + include = ET.Element("Exclusion") + if "include" in scope_constraint: + include.text = scope_constraint["include"] + else: + include.text = "Include" + + scope_constraintXML.append(include) + http_param = ET.Element("HttpParameterList") + scope_constraintXML.append(http_param) + + #Add a customer header, like an API token + for elem in xmlRoot.iterfind('HTTPHeadersConfig/CustomHeadersList'): + for custom_header in custom_headers: + customHeaders = ET.Element("CustomHeaders") + elem.append(customHeaders) + value = ET.Element("Value") + value.text = custom_header["custom_header"] + customHeaders.append(value) + + return ET.tostring(xmlRoot, method="xml") + + #Saves a file from string + def save_file(self, data, filename): + success = None + #If the API can't find the file it returns a json object + if "IsSuccess" in data: + success = False + else: + file = open(filename,"wb") + file.write(data) + file.close + success = True + + return success + + ###### Scan API ####### + + ###### Scan Management ###### + def get_scans(self): + """Retrieves the list of scans. + + """ + + return self._request('GET', "Scan/GetScans") + + def run_scan(self, configId=None, configName=None): + """Starts a scan. At least one parameter should be provided to start a scan + + :param configId: Scan config ID (guid) + :param configName: Scan config name + + """ + params = {} + if configId: + params['configId'] = configId + + if configName: + params['configName'] = configName + + return self._request('POST', "Scan/RunScan/", data=params) + + def cancel_scan(self, scanId): + """Cancels "Starting" or "Waiting for Cloud" scan + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('POST', "/Scan/CancelScan", data=params) + + def pause_scan(self, scanId): + """Pauses a running scan + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('POST', "/Scan/PauseScan", data=params) + + def pause_all_scans(self): + """Pauses all running scans + + + """ + + return self._request('POST', "/Scan/PauseAllScans") + + def resume_scan(self, scanId): + """Resumes a scan + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('POST', "/Scan/ResumeScan", data=params) + + def resume_all_scans(self): + """Resumes all scans + + + """ + + return self._request('POST', "/Scan/ResumeAllScans") + + def stop_scan(self, scanId): + """Stops a running scan + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('POST', "/Scan/StopScan", data=params) + + def stop_all_scans(self): + """Stops all scans + + + """ + + return self._request('POST', "/Scan/StopAllScans") + + def get_scan_status(self, scanId): + """Retrieves the scan status represented by a string + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('GET', "Scan/GetScanStatus", params) + + def is_scan_active(self, scanId): + """Checks to see if the specified scan is active + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('GET', "Scan/IsScanActive", params) + + def is_scan_finished(self, scanId): + """Checks to see if the specified scan is finished + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('GET', "Scan/IsScanFinished", params) + + def scan_has_report(self, scanId): + """Checks to see if the specified scan has a report + + :param scanId: Scan ID (guid) + + """ + + params = {} + params['scanId'] = scanId + + return self._request('GET', "Scan/HasReport", params) + + ###### Finding API ####### + def get_vulnerabilities(self): + """Retrieves the list of vulnerabilities filtered by the specified parameters. + + """ + + return self._request('GET', "Finding/GetVulnerabilities") + + ###### Scan Engine Operations ####### + def admin_get_engines(self): + """Retrieves the list of scan engines. + + """ + + return self._request('GET', "Engine/GetEngines") + + def admin_save_engine(self, url, virtualName, login, password, id=None, notes=None, doNotUpdate=None): + """Creates or updates scan engine + + :param id: if id not provided new engine will be created. if id provided engine update performed. + :param url: Scan engine URL. URL scheme should be {scheme}://{domain}/{path}/default.asmx + :param virtualName: Scan engine name + :param login: Scan engine username + :param notes: Notes + :param doNotUpdate: Do not update engine property + + """ + + params = {} + + params['url'] = url + params['virtualName'] = virtualName + params['login'] = login + params['password'] = password + + if id: + params['id'] = id + + if notes: + params['notes'] = notes + + if doNotUpdate: + params['doNotUpdate'] = doNotUpdate + + return self._request('POST', "Engine/SaveEngine", params) + + def admin_delete_engine(self, ids): + """Scan engine IDs + + :param ids: Scan Engine ID (guid) + + """ + params['ids'] = ids + + return self._request('POST', "Engine/DeleteEngine", params) + + ###### Scan Engine Operations ####### + def admin_get_all_engine_groups(self): + """Retrieves the list of scan engine groups. Note that System Administrator credentials are required to work with scan engines + + """ + + return self._request('GET', "EngineGroup/GetAllEngineGroups") + + def admin_get_engine_groups_for_client(self): + """Retrieves the list of scan engine groups for a client. Note that System Administrator credentials are required to work with scan engines + + """ + + return self._request('GET', "EngineGroup/GetEngineGroupsForClient") + + def admin_save_engine_group(self, name, description=None, monitoring=None, id=None): + """Creates or updates a scan engine group + + :param id: If id not provided a new engine group will be created. If an id is provided then an engine group update is performed. + :param name: Scan engine group name. Name should be unique + :param description: Scan engine group description + :param monitoring: Scan engine group is monitoring + + """ + + params = {} + + params['name'] = name + + if id: + params['id'] = id + + if description: + params['description'] = description + + if monitoring: + params['monitoring'] = monitoring + + return self._request('POST', "EngineGroup/SaveEngineGroup", data=params) + + def admin_delete_engine_group(self, ids): + """Deletes a scan engine group + + :param ids: Scan engine group IDs (guid) + + """ + + params = {} + + params['ids'] = ids + + return self._request('POST', "EngineGroup/DeleteEngineGroup", data=params) + + def admin_add_engine_to_group(self, groupId, engineId): + """Adds a scan engine to a scan engine group + + :param groupId: Scan engine group ID + :param engineId: Scan engine ID + + """ + + params = {} + + params['groupId'] = groupId + params['engineId'] = engineId + + return self._request('POST', "EngineGroup/AddEngineToGroup", data=params) + + def admin_delete_engine_from_group(self, groupId, engineId): + """Deletes scan engine from scan engine group + + :param groupId: Scan engine group ID + :param engineId: Scan engine ID + + """ + + params = {} + + params['groupId'] = groupId + params['engineId'] = engineId + + return self._request('POST', "EngineGroup/DeleteEngineFromGroup", data=params) + + ###### Report Management ####### + def import_standard_report(self, reportData, scanId=None, configId=None): + """Creates a new scan in the scan history or updates the report for the specified scan + + :param scanId: Update scan report if scanId provided and create new scan details if not + :param reportData: Report file + :param configId: Config id uploaded report attached to + + """ + + params = {} + + params['reportData'] = reportData + + if scanId: + params['scanId'] = scanId + + if configId: + params['configId'] = configId + + return self._request('POST', "Report/ImportStandardReport", data=params) + + def import_checkmarx_report(self, scanId, file): + """Creates a new scan in the scan history or updates the report for the specified scan + + :param scanId: Scan ID + :param file: Checkmarx report XML file + + """ + + params = {} + + params['scanId'] = scanId + params['file'] = file + + return self._request('POST', "Report/ImportCheckmarxReport", data=params) + + def get_vulnerabilities_summary(self, scanId): + """Gets VulnerabilitiesSummary.xml for the scan. Only scans in "Completed" and "Stopped" states may have a report + + :param scanId: Scan ID + + """ + + params = {} + + params['scanId'] = scanId + + return self._request('GET', "Report/GetVulnerabilitiesSummaryXml", params) + + def get_report_zip(self, scanId): + """Gets ReportAllFiles.zip for the scan. Only scans in "Completed" and "Stopped" states may have reports + + :param scanId: Scan ID + + """ + + params = {} + + params['scanId'] = scanId + + return self._request('GET', "Report/GetReportZip", params) + + def get_crawled_links(self, scanId): + """Gets CrawledLinks.xml for the scan. Only scans in "Completed" and "Stopped" states may have a report + + :param scanId: Scan ID + + """ + + params = {} + + params['scanId'] = scanId + + return self._request('GET', "Report/GetCrawledLinksXml", params) + + ###### Scan Configuration Operations ####### + def save_config(self, xml, name, engineGroupId, clientId, id=None, defendEnabled=False, monitoring=False, + monitoringDelay=0, monitoringTriggerScan=False, isApproveRequired=False, seed_url=False, constraint_url=False, + seed_urls=False, scope_constraints=False, custom_headers=False): + """Creates a new scan configuration + + :param id: If id not provided new config will be created. If id provided config update performed. + :param xml: Scan config xml file. Config name should be unique in the client. + :param defendEnabled: AppSpider Defend enabled + :param monitoring: Monitoring scanning enabled + :param monitoringDelay: Delay between monitoring scans in hours. Possible values are 1 (hour), 24 (day), 168 (week), 720 (month) + :param monitoringTriggerScan: Monitoring scan triggers attack scan if changes found + :param name: Config name + :param engineGroupId: Engine group id for scan config + :param isApproveRequired: Approve required property + + """ + + params = {} + + #Required Parameters + params['Name'] = name + params['EngineGroupId'] = engineGroupId + params['ClientId'] = clientId + + #Not required parameters + params['Id'] = id + params['DefendEnabled'] = defendEnabled + params['Monitoring'] = monitoring + params['MonitoringDelay'] = monitoringDelay + params['MonitoringTriggerScan'] = monitoringTriggerScan + params['IsApproveRequired'] = isApproveRequired + + #XML Scan Config Parameters + params['Xml'] = self.edit_scan_config_xml(xml, seed_urls, scope_constraints, custom_headers) + + return self._request('POST', "Config/SaveConfig", files={'Config': (None,json.dumps(params))}) + + def get_configs(self): + """Retrieves all scan configs for the client + + """ + + return self._request('GET', "Config/GetConfigs") + + def get_config(self, id): + """Retrieves scan config for the client + + :param id: Scan config ID + + """ + + params = {} + + params['id'] = id + + return self._request('GET', "Config/GetConfig", params) + + def get_attachment(self, configId, fileName, fileType): + """Retrieves auxiliary files (such as macro, traffic recording, etc), referenced in the scan configuration + + :param configId: Scan config ID + :param fileName: Name of requested file + :param fileType: File type. Values are: "Authentication", "Certificate", "Crawling", "Selenium", "Traffic", "Wsdl + + """ + + params = {} + + params['configId'] = configId + params['fileName'] = fileName + params['fileType'] = fileType + + return self._request('POST', "Config/GetAttachment", data=params) + + ###### Blackout Operations Operations ####### + def get_blackouts(self): + """Retrieves the blackout list for the client + + + """ + + return self._request('GET', "Blackout/GetBlackouts") + + def save_blackout(self, name, startTime, targetHost, id=None, stopTime=None, isRecurring=None, recurrence=None): + """Creates or updates a blackout window + + :param name: Blackout name. Name should be unique in the client + :param startTime: Date and time the blackout starts + :param targetHost: Name of host for the blackout + :param id: Blackout id. Update blackout if id provided and create new blackout if not provided + :param stopTime: Date and time the blackout ends + :param isRecurring: Marks the blackout as a reoccurring event + :param recurrence: Sets the recurrence frequency. See the section "Recurrences Explained" for more detail. + + """ + + params = {} + + params['name'] = name + params['startTime'] = startTime + params['targetHost'] = targetHost + + if id: + params['id'] = id + + if stopTime: + params['stopTime'] = id + + if isRecurring: + params['isRecurring'] = id + + if recurrence: + params['recurrence'] = id + + return self._request('POST', "Blackout/SaveBlackout", data=params) + + def delete_blackouts(self, blackoutIds): + """Removes a blackout window + + :param blackoutIds: Scan config ID + + """ + + params = {} + + params['blackoutIds'] = blackoutIds + + return self._request('POST', "Blackout/DeleteBlackouts", data=params) + + + # Utility + @staticmethod + def _build_list_params(param_name, key, values): + """Builds a list of POST parameters from a list or single value.""" + params = {} + if hasattr(values, '__iter__'): + index = 0 + for value in values: + params[str(param_name) + '[' + str(index) + '].' + str(key)] = str(value) + index += 1 + else: + params[str(param_name) + '[0].' + str(key)] = str(values) + return params + + def _request(self, method, url, params=None, data=None, files=None): + """Common handler for all HTTP requests.""" + if not params: + params = {} + + if data: + data = json.dumps(data) + + headers = { + 'User-Agent': self.user_agent, + 'Authorization': 'Basic ' + str(self.token) + } + + if not files: + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + + if self.proxies: + proxies=self.proxies + else: + proxies = {} + + try: + if self.debug: + print(method + ' ' + url) + print(params) + + response = requests.request(method=method, url=self.host + url, params=params, data=data, files=files, headers=headers, + timeout=self.timeout, verify=self.verify_ssl, cert=self.cert, proxies=proxies) + + if self.debug: + print(response.status_code) + print(response.text) + + try: + if response.status_code == 201: #Created new object + data = response.json() + + return AppSpiderResponse(message="Upload complete", data=data, success=True) + elif response.status_code == 204: #Object updates + return AppSpiderResponse(message="Object updated.", success=True) + elif response.status_code == 404: #Object not created + return AppSpiderResponse(message="Object id does not exist.", success=False) + elif 'content-disposition' in response.headers: + data = response.content + return AppSpiderResponse(message="Success", data=data, success=True, response_code=response.status_code) + else: + data = response.json() + return AppSpiderResponse(message="Success", data=data, success=True, response_code=response.status_code) + except ValueError as e: + return AppSpiderResponse(message='JSON response could not be decoded. Detailed error: ' + str(e), success=False) + except requests.exceptions.SSLError as e: + return AppSpiderResponse(message='An SSL error occurred. Detailed error: ' + str(e), success=False) + except requests.exceptions.ConnectionError as e: + return AppSpiderResponse(message=str(e) + 'A connection error occurred. Detailed error: ' + str(e), success=False) + except requests.exceptions.Timeout as e: + return AppSpiderResponse(message='The request timed out after ' + str(self.timeout) + ' seconds.', + success=False) + except requests.exceptions.RequestException as e: + return AppSpiderResponse(message='There was an error while handling the request. Detailed error: ' + str(e), success=False) + + +class AppSpiderResponse(object): + """ + Container for all AppSpider Enterprise API responses, even errors. + + """ + + def __init__(self, message, success, data=None, response_code=-1): + self.message = message + self.data = data + self.success = success + self.response_code = response_code + + def __str__(self): + if self.data: + return str(self.data) + else: + return self.message + + def binary(self): + return self.data + + def json(self): + return self.data + + def id(self): + if self.response_code == 400: #Bad Request + raise ValueError('Object not created:' + json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': '))) + return int(self.data) + + def count(self): + return self.data["TotalCount"] + + def is_success(self): + data = None + + try: + data = self.data["IsSuccess"] + except: + data = self.data + + return data + + def error(self): + errorMessage = self.message + + if self.data is not None: + if "ErrorMessage" in self.data: + self.data["ErrorMessage"] + + return errorMessage + + def data_json(self, pretty=False): + """Returns the data as a valid JSON string.""" + if pretty: + return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')) + else: + return json.dumps(self.data) diff --git a/tools/appspider/config.yaml b/tools/appspider/config.yaml new file mode 100644 index 0000000..119b220 --- /dev/null +++ b/tools/appspider/config.yaml @@ -0,0 +1,60 @@ +appspider: + version: AppSecPipeline 0.5.0 + tool-version: 3.8 + tags: + - "Dynamic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "AppSpider automatically finds vulnerabilities across a wide range of applications from the relatively simple to the most complex—and it includes unique capabilities that enable teams to automate more of the security testing program across the entire software development lifecycle, from creation through production." + url: https://www.rapid7.com/products/appspider/ + documentation: https://appspider.help.rapid7.com/docs/ + docker: "appsecpipeline/base:latest" + parameters: + APPSPIDER_RUN_SCAN_POLL: + type: runtime + data_type: bool + description: "Wait for the scan to complete and specify the AppSpider profile." + APPSPIDER_CLIENT: + type: runtime + data_type: string + description: "AppSpider Client to use for connecting to the API." + APPSPIDER_URL: + type: config + data_type: url + description: "AppSpider Enterprise URL." + APPSPIDER_USERNAME: + type: config + data_type: username + description: "AppSpider API Username" + APPSPIDER_PASSWORD: + type: config + data_type: password + description: "AppSpider API Password" + APPSPIDER_ADMIN_USERNAME: + type: config + data_type: username + description: "AppSpider Admin API Username" + APPSPIDER_ADMIN_PASSWORD: + type: config + data_type: password + description: "AppSpider Admin API Password" + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/appspider/AppSpider.py --url $APPSPIDER_URL --username $APPSPIDER_USERNAME --password $APPSPIDER_PASSWORD --admin-username $APPSPIDER_ADMIN_USERNAME --admin-password $APPSPIDER_ADMIN_PASSWORD --client $APPSPIDER_CLIENT" + shell: False + report: "--output-file='{reportname}'" + reportname: "VulnerabilitiesSummary.xml" + post: + junit: + profiles: + all: "--run-scan-poll $APPSPIDER_RUN_SCAN_POLL" + active: "--checks=active/*" + passive: "--checks=passive/*" + fast: "--audit-forms --checks=xss --scope-page-limit=1" + file_upload: "--checks=form_upload" + xss: "--checks=xss" + remote: "--checks=file_inclusion" + command_exec: "--checks=os_cmd_injection" + sqli: "--checks=sql_injection" diff --git a/tools/appspider/parser.py b/tools/appspider/parser.py new file mode 100644 index 0000000..0ad752d --- /dev/null +++ b/tools/appspider/parser.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +import xml.etree.ElementTree as ET +import csv +from datetime import datetime +import re +import argparse +import os + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + + #Parse the XML file + tree = None + try: + #Open up the XML file from the nikto output + tree = ET.parse(args.file) + root = tree.getroot() + scan = root.find('scandetails') + datestring = datetime.strftime(datetime.now(), '%m/%d/%Y') + + #Find only the base filname, save as csv + base = os.path.basename(args.file) + + csv_output = open(os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv"), 'w') + csvwriter = csv.writer(csv_output) + + """ + Date: :: + Date of the finding in mm/dd/yyyy format. + Title: :: + Title of the finding + CweId: :: + Cwe identifier, must be an integer value. + Url: :: + Url associated with the finding. + Severity: :: + Severity of the finding. Must be one of Info, Low, Medium, High, or Critical. + Description: :: + Description of the finding. Can be multiple lines if enclosed in double quotes. + Mitigation: :: + Possible Mitigations for the finding. Can be multiple lines if enclosed in double quotes. + Impact: :: + Detailed impact of the finding. Can be multiple lines if enclosed in double quotes. + References: :: + References associated with the finding. Can be multiple lines if enclosed in double quotes. + Active: :: + Indicator if the finding is active. Must be empty, True or False + Verified: :: + Indicator if the finding has been verified. Must be empty, True, or False + FalsePositive: :: + Indicator if the finding is a false positive. Must be empty, True, or False + Duplicate: :: + Indicator if the finding is a duplicate. Must be empty, True, or False + """ + csvwriter.writerow(["Date","Title","CweId","Url","Severity","Description","Mitigation","Impact","References","Active","Verified","FalsePositive","Duplicate"]) + + for item in scan.findall('item'): + finding = [] + + #CSV format + + ####### Individual fields ######## + #Date + finding.append(datestring) + + #Title + titleText = None + description = item.find("description").text + #Cut the title down to the first sentence + sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', description) + if len(sentences) > 0: + titleText = sentences[0][:900] + else: + titleText = description[:900] + finding.append(titleText) + + #CweId + finding.append("0") + + #Url + ip = item.find("iplink").text + #Remove the port numbers for 80/443 + ip = ip.replace(":80","") + ip = ip.replace(":443","") + + finding.append(ip) + + #Severity + finding.append("Low") #Nikto doesn't assign severity, default to low + + #Description + finding.append(item.find("description").text) + + #Mitigation + finding.append("") + + #Impact + finding.append("") + + #References + finding.append("") + + #Active + finding.append("False") + + #Verified + finding.append("False") + + #FalsePositive + finding.append("False") + + #Duplicate + finding.append("False") + + csvwriter.writerow(finding) + + csv_output.close() + except: + print "Nothing in report" diff --git a/tools/arachni/config.yaml b/tools/arachni/config.yaml new file mode 100644 index 0000000..c33912b --- /dev/null +++ b/tools/arachni/config.yaml @@ -0,0 +1,54 @@ +arachni: + version: AppSecPipeline 0.5.0 + tool-version: + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "Arachni is a feature-full, modular, high-performance Ruby framework aimed towards helping penetration testers and administrators evaluate the security of modern web applications." + url: http://www.arachni-scanner.com/ + documentation: https://github.com/Arachni/arachni/wiki/Command-line-user-interface + docker: "appsecpipeline/base-tools:latest" + parameters: + URL: + type: runtime + data_type: url + description: "URL of host to scan." + LOGIN_URL: + type: runtime + data_type: url + description: "Login URL of host to scan." + LOGIN_PARMS: + type: runtime + data_type: string + description: "Login paramaters in the format of username=user&password=password." + LOGIN_SUCCESS: + type: runtime + data_type: string + description: "Succesful login text to match on." + LOGIN_LOGOUT_PATTERN: + type: runtime + data_type: string + description: "Logout text to avoid." + commands: + pre: + exec: "arachni $URL" + shell: True + report: "--report-save-path={reportname}" + reportname: "{timestamp}.afr" + post: "arachni_reporter {reportname} --reporter=json:outfile={reportname}.json" + junit: + credentials: + simple: --plugin=autologin:url=$LOGIN_URL,parameters="$LOGIN_PARMS",check="$LOGIN_SUCCESS" --scope-exclude-pattern=$LOGIN_LOGOUT_PATTERN + profiles: + all: "" + active: "--checks=active/*" + passive: "--checks=passive/*" + fast: "--audit-forms --checks=xss --scope-page-limit=1" + file_upload: "--checks=form_upload" + xss: "--checks=xss" + remote: "--checks=file_inclusion" + command_exec: "--checks=os_cmd_injection" + sqli: "--checks=sql_injection" diff --git a/tools/arachni/parser.py b/tools/arachni/parser.py new file mode 100644 index 0000000..0ad752d --- /dev/null +++ b/tools/arachni/parser.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +import xml.etree.ElementTree as ET +import csv +from datetime import datetime +import re +import argparse +import os + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + + #Parse the XML file + tree = None + try: + #Open up the XML file from the nikto output + tree = ET.parse(args.file) + root = tree.getroot() + scan = root.find('scandetails') + datestring = datetime.strftime(datetime.now(), '%m/%d/%Y') + + #Find only the base filname, save as csv + base = os.path.basename(args.file) + + csv_output = open(os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv"), 'w') + csvwriter = csv.writer(csv_output) + + """ + Date: :: + Date of the finding in mm/dd/yyyy format. + Title: :: + Title of the finding + CweId: :: + Cwe identifier, must be an integer value. + Url: :: + Url associated with the finding. + Severity: :: + Severity of the finding. Must be one of Info, Low, Medium, High, or Critical. + Description: :: + Description of the finding. Can be multiple lines if enclosed in double quotes. + Mitigation: :: + Possible Mitigations for the finding. Can be multiple lines if enclosed in double quotes. + Impact: :: + Detailed impact of the finding. Can be multiple lines if enclosed in double quotes. + References: :: + References associated with the finding. Can be multiple lines if enclosed in double quotes. + Active: :: + Indicator if the finding is active. Must be empty, True or False + Verified: :: + Indicator if the finding has been verified. Must be empty, True, or False + FalsePositive: :: + Indicator if the finding is a false positive. Must be empty, True, or False + Duplicate: :: + Indicator if the finding is a duplicate. Must be empty, True, or False + """ + csvwriter.writerow(["Date","Title","CweId","Url","Severity","Description","Mitigation","Impact","References","Active","Verified","FalsePositive","Duplicate"]) + + for item in scan.findall('item'): + finding = [] + + #CSV format + + ####### Individual fields ######## + #Date + finding.append(datestring) + + #Title + titleText = None + description = item.find("description").text + #Cut the title down to the first sentence + sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', description) + if len(sentences) > 0: + titleText = sentences[0][:900] + else: + titleText = description[:900] + finding.append(titleText) + + #CweId + finding.append("0") + + #Url + ip = item.find("iplink").text + #Remove the port numbers for 80/443 + ip = ip.replace(":80","") + ip = ip.replace(":443","") + + finding.append(ip) + + #Severity + finding.append("Low") #Nikto doesn't assign severity, default to low + + #Description + finding.append(item.find("description").text) + + #Mitigation + finding.append("") + + #Impact + finding.append("") + + #References + finding.append("") + + #Active + finding.append("False") + + #Verified + finding.append("False") + + #FalsePositive + finding.append("False") + + #Duplicate + finding.append("False") + + csvwriter.writerow(finding) + + csv_output.close() + except: + print "Nothing in report" diff --git a/tools/bandit/config.yaml b/tools/bandit/config.yaml new file mode 100644 index 0000000..2e2891b --- /dev/null +++ b/tools/bandit/config.yaml @@ -0,0 +1,31 @@ +bandit: + version: AppSecPipeline 0.5.0 + tool-version: + name: bandit + tags: + - "Static Code Analyzer" + type: "static" + description: "Bandit is a tool designed to find common security issues in Python code. To do this Bandit processes each file, builds an AST from it, and runs appropriate plugins against the AST nodes. Once Bandit has finished scanning all the files it generates a report." + docker: "appsecpipeline/base-tools:latest" + url: https://wiki.openstack.org/wiki/Security/Projects/Bandit + documentation: https://docs.openstack.org/bandit/latest/index.html + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + pre: + exec: "bandit" + shell: True + report: "-f csv -o {reportname}" + reportname: "{timestamp}.csv" + post: "python /usr/bin/appsecpipeline/tools/bandit/parser.py -f {reportname}" + junit: "junit.py -f {reportname} -t bandit" + languages: + - "python" + profiles: + #Runs the full bandit scan + all: "-r $LOC" + #Only the issues that are the highest severity and the highest confidence + tuned: "-lll -iii -r $LOC" diff --git a/tools/bandit/parser.py b/tools/bandit/parser.py new file mode 100644 index 0000000..cc09424 --- /dev/null +++ b/tools/bandit/parser.py @@ -0,0 +1,123 @@ +import csv +from datetime import datetime +import re +import argparse +import os + +""" +Date: :: +Date of the finding in mm/dd/yyyy format. +Title: :: +Title of the finding +CweId: :: +Cwe identifier, must be an integer value. +Url: :: +Url associated with the finding. +Severity: :: +Severity of the finding. Must be one of Info, Low, Medium, High, or Critical. +Description: :: +Description of the finding. Can be multiple lines if enclosed in double quotes. +Mitigation: :: +Possible Mitigations for the finding. Can be multiple lines if enclosed in double quotes. +Impact: :: +Detailed impact of the finding. Can be multiple lines if enclosed in double quotes. +References: :: +References associated with the finding. Can be multiple lines if enclosed in double quotes. +Active: :: +Indicator if the finding is active. Must be empty, True or False +Verified: :: +Indicator if the finding has been verified. Must be empty, True, or False +FalsePositive: :: +Indicator if the finding is a false positive. Must be empty, True, or False +Duplicate: :: +Indicator if the finding is a duplicate. Must be empty, True, or False +""" +def generic_csv(date=None, title=None, cwe=None, url=None, severity=None, description=None, mitigation=None, impact=None, references=None, active="False", verified="False", falsepositive="False", duplicate="False"): + + finding = [] + datestring = datetime.strftime(datetime.now(), '%m/%d/%Y') + + #Date + finding.append(datestring) + + #Title + finding.append(title) + + #CweId + finding.append(cwe) + + #Url + finding.append(url) + + #Severity + finding.append(severity) + + #Description + finding.append(description) + + #Mitigation + finding.append(mitigation) + + #Impact + finding.append(impact) + + #References + finding.append(references) + + #Active + finding.append(active) + + #Verified + finding.append(verified) + + #FalsePositive + finding.append(falsepositive) + + #Duplicate + finding.append(duplicate) + + return finding + +def writeFirstRow(csvwriter): + csvwriter.writerow(["Date","Title","CweId","Url","Severity","Description","Mitigation","Impact","References","Active","Verified","FalsePositive","Duplicate"]) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + + """ + filename,test_name,test_id,issue_severity,issue_confidence,issue_text,line_number,line_range + PyBitBucket.py,blacklist,B405,LOW,HIGH,"Using cElementTree to parse untrusted XML data is known to be vulnerable to XML attacks. Replace cElementTree with the equivalent defusedxml package, or make sure defusedxml.defuse_stdlib() is called.",6,"[6, 7, 8, 9]" + """ + + #Constants for column names + FILENAME = 0 + TEST_NAME = 1 + ISSUE_SEVERITY = 3 + ISSUE_CONFIDENCE = 4 + ISSUE_TEXT = 5 + LINE_NUMBER = 6 + LINE_RANGE = 7 + + #Find only the base filname, save as csv + base = os.path.basename(args.file) + csv_output = open(os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv"), 'w') + csvwriter = csv.writer(csv_output) + + with open(args.file, 'rb') as csvfile: + reader = csv.reader(csvfile, delimiter=',') + writeFirstRow(csvwriter) + first = True + for row in reader: + if first: + first = False + else: + description = row[ISSUE_TEXT] + description = description + " Filename: " + row[FILENAME] + description = description + " Line number: " + row[LINE_NUMBER] + description = description + " Line range: " + row[LINE_RANGE].strip("\n") + description = description + " Issue Confidence: " + row[ISSUE_CONFIDENCE] + csvwriter.writerow(generic_csv(title=row[TEST_NAME], severity=ISSUE_SEVERITY, description=description)) diff --git a/tools/brakeman/config.yaml b/tools/brakeman/config.yaml new file mode 100644 index 0000000..d2914de --- /dev/null +++ b/tools/brakeman/config.yaml @@ -0,0 +1,29 @@ +brakeman: + version: AppSecPipeline 0.5.0 + tags: + - "Static Code Analyzer" + type: "static" + description: "Brakeman is an open source vulnerability scanner specifically designed for Ruby on Rails applications. It statically analyzes Rails application code to find security issues at any stage of development." + docker: "appsecpipeline/ruby:latest" + url: https://brakemanscanner.org/ + documentation: https://brakemanscanner.org/docs/ + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + pre: + exec: "brakeman -p $LOC --no-pager" + shell: False + post: + report: "-o {reportname}" + reportname: "{timestamp}.json" + junit: + languages: + - "ruby" + profiles: + #There are some checks which are not run by default. To run all checks, use + all: "-A" + #If Brakeman is running a bit slow, try + tuned: "--faster" diff --git a/tools/checkmarx/.DS_Store b/tools/checkmarx/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z45|!R0Z1N%F(jFgL>QrFAPJ2!M?+vV1V%$(Gz3ON zU^D~<VF)ln+{D2Rp-0Kl5Eu=C(GY-#0H}OW0QD6Z7#JL&bOVG2Nii@oFo3%Nj0_Ac zFio(203!nfNGnJUNGpg2X=PvpvA|}4wK6b5wK9UcAq)(R;4TS>25V<v1ltVagS9g- zf^BACV1#IAV1(Mt2<@RTf_gL{^C8+97{Ru~TsKOOhQMeDz(Rl-!Vmz}|E>%SxcdJP zRior+2#kinunYl47MEZbCs3t{!+W4QHvuXKVuPw;Mo^s$(F3lEVT}ML$bg~*R5_@+ b2Uo?6kTwK}57Iu`5P${HC_Nei0}uiLNUI8I literal 0 HcmV?d00001 diff --git a/tools/checkmarx/PyCheckmarx.py b/tools/checkmarx/PyCheckmarx.py new file mode 100644 index 0000000..5363812 --- /dev/null +++ b/tools/checkmarx/PyCheckmarx.py @@ -0,0 +1,512 @@ +############################### +# > Author: Duarte Monteiro +# > Version: 1.0 +# > Vendor: www.checkmarx.com +# > Notes: Python API for Checkmarx WSDL +############################### + +# Python Dependencies +from suds.client import Client +from suds.sudsobject import asdict +from suds.cache import NoCache +import base64 +import re +import json +import time +from zipfile import ZipFile +import os +import uuid +import ssl + +class PyCheckmarx(object): + + # Internal Variables for the Class + DEBUG = False + configPath = "config/" + errorLog = [] + ttlReport = 50 + timeWaitReport = 30 + ssl._create_default_https_context = ssl._create_unverified_context + # + # Init Function + # + def __init__(self, username, password, url): + # Get Configuration + self.getConfig(username, password, url) + # Open Connection With Checkmarx + self.Initclient = self.openConnection() + # Get the Service URL + self.serviceUrl = self.getServiceUrl(self.Initclient) + # Get the Session Id and Client Object + (self.sessionId, self.client) = self.getSessionId(self.Initclient,self.serviceUrl) + return None + + ########################################## + # + # Functions Related to Opening session with Checkmarx + # + ########################################## + + # + # Get Configuration + # + def getConfig(self, username, password, url): + self.USERNAME = username + self.PASSWORD = password + self.URL = str(url + "Cxwebinterface/CxWsResolver.asmx?wsdl") + self.cxURL = str(url) + self.APITYPE = 1 + self.baseProject = None + + # + # Open Connection + # + def openConnection(self): + try: + #proxy_settings = dict(http='http://localhost:8081') + #tmpClient = Client(self.URL, timeout=1200, proxy=proxy_settings) + tmpClient = Client(self.URL, timeout=1200) + if self.DEBUG: + print dir(tmpClient) + return tmpClient + except Exception as e: + raise Exception("Unable to establish connection with WSDL [%s]: %s " % (self.URL, e.message)) + + # + # Get Service URL + # + def getServiceUrl(self, client): + try: + CxClient = client.factory.create('CxClientType') + responseDiscovery = client.service.GetWebServiceUrl(CxClient.Jenkins,self.APITYPE) + + if responseDiscovery.IsSuccesfull: + serviceUrl = responseDiscovery.ServiceURL + print "Checkmarx Service URL: " + serviceUrl + else: + raise Exception("Error establishing connection > %s" % cxSDK.ErrorMessage) + + if self.DEBUG: + print "Response Discovery Object:", dir(responseDiscovery) + print "Service Url:", serviceUrl + + return serviceUrl + except Exception as e: + raise Exception("Unable to get Service URL: %s" % e.message) + + # + # Login in Checkmarx and retrive the Session ID + # + def getSessionId(self,client, serviceUrl): + try: + #proxy_settings = dict(http='http://localhost:8081') + #clientSDK = Client(serviceUrl + "?wsdl", cache=NoCache(), timeout=1200, proxy=proxy_settings) + clientSDK = Client(serviceUrl + "?wsdl", timeout=1200, cache=NoCache()) + + CxLogin = clientSDK.factory.create("Credentials") + CxLogin.User = self.USERNAME + CxLogin.Pass = self.PASSWORD + + cxSDK = clientSDK.service.Login(CxLogin,1033) + + if not cxSDK.IsSuccesfull: + raise Exception("Unable to Login > %s" % cxSDK.ErrorMessage) + + if self.DEBUG: + print "Service Object:", dir(client) + print "Login Object:", dir(cxSDK) + print "Session ID:", cxSDK.SessionId + + return (cxSDK.SessionId, clientSDK) + except Exception as e: + raise Exception("Unable to get SessionId from [%s] : %s" % (serviceUrl,e.message)) + + ########################################## + # + # Functions Related to the functionality of the WSDL + # + ########################################## + + # + # Create a scan job + # + def scanProject(self, ProjectName, ServerName, SSHFilePath, PresetID=0, GITBranch="master"): + #Project Settings + ProjectSettings = self.client.factory.create("ProjectSettings") + ProjectSettings.ProjectName = ProjectName + ProjectSettings.PresetID = PresetID + ProjectSettings.projectID = 0 + ProjectSettings.ScanConfigurationID = 1 + ProjectSettings.IsPublic = "false" + del ProjectSettings.OpenSourceAnalysisOrigin + + #Client Scan Arguements + CliScanArgs = self.client.factory.create("CliScanArgs") + CliScanArgs.IsPrivateScan = "false" + CliScanArgs.IsIncremental = "false" + CliScanArgs.IgnoreScanWithUnchangedCode = "true" + del CliScanArgs.ClientOrigin + + #Scan Settings + SourceCodeSettings = self.client.factory.create("SourceCodeSettings") + SourceCodeSettings.SourceOrigin = "SourceControl" + SourceCodeSettings.SourceControlSetting.Port = "0" + SourceCodeSettings.SourceControlSetting.UseSSL = "false" + SourceCodeSettings.SourceControlSetting.UseSSH = "true" + SourceCodeSettings.SourceControlSetting.ServerName = ServerName + SourceCodeSettings.SourceControlSetting.Repository = "GIT" + SourceCodeSettings.SourceControlSetting.Protocol = "SSH" + SourceCodeSettings.SourceControlSetting.GITBranch = GITBranch + SourceCodeSettings.SourceControlSetting.SSHPublicKey = "EmptyStab" + + #Load the ssh key + file = open(SSHFilePath, "r") + SourceCodeSettings.SourceControlSetting.SSHPrivateKey = file.read() + + #Remove "extra" unecessary elements + del SourceCodeSettings.SourceControlSetting.PerforceBrowsingMode + del SourceCodeSettings.SourceControlSetting.GitLsViewType + + #Set the client scanning arguments + CliScanArgs.PrjSettings = ProjectSettings + CliScanArgs.SrcCodeSettings = SourceCodeSettings + + tmp = self.client.service.Scan(self.sessionId, CliScanArgs) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + return tmp + + def get_directory(self, directory): + + file_paths = [] + + for root, directories, files in os.walk(directory): + for filename in files: + filepath = os.path.join(root, filename) + file_paths.append(filepath) + + return file_paths + + def scanExistingProject(self, ProjectId, directory): + config = self.client.service.GetProjectConfiguration(self.sessionId, ProjectId) + + localCodeContainer = self.client.factory.create("LocalCodeContainer") + tempZip = "/tmp/" + str(uuid.uuid4()) + ".zip" + + file_paths = self.get_directory(directory) + with ZipFile(tempZip,'w') as zip: + for file in file_paths: + zip.write(file) + + srcCode = open(tempZip, 'rb') + + srcCodeInput = srcCode.read() + localCodeContainer.ZippedFile = base64.encodestring(srcCodeInput) + localCodeContainer.FileName = str(uuid.uuid4()) + ".zip" + os.remove(tempZip) + + RunScanAndAddToProject = self.client.factory.create("RunScanAndAddToProject") + RunScanAndAddToProject.visibleToUtherUsers = True + RunScanAndAddToProject.isPublicScan = True + + tmp = self.client.service.RunScanAndAddToProject(self.sessionId, config.ProjectConfig.ProjectSettings,localCodeContainer,RunScanAndAddToProject.visibleToUtherUsers, RunScanAndAddToProject.isPublicScan) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + return tmp.RunId + + def getStatusOfSingleScan(self, RunId): + + ScanId = None + inc = 0 + while inc < self.ttlReport: + inc += 1 + + try: + status = self.client.service.GetStatusOfSingleScan(self.sessionId, RunId) + + if status.CurrentStatus == "Finished": + ScanId = status.ScanId + break + + except Exception as e: + print e + + print "Waiting for Checkmarx to complete." + time.sleep(self.timeWaitReport) + + if self.DEBUG: + print dir(status) + + return ScanId + + # + # Get Suppressed Issues + # + def getXMLReport(self, scanID, fileName): + CxWSReportType = self.client.factory.create("CxWSReportType") + CxReportRequest = self.client.factory.create("CxWSReportRequest") + CxReportRequest.ScanID = scanID + CxReportRequest.Type = CxWSReportType.XML + createReportResponse = self.client.service.CreateScanReport(self.sessionId, CxReportRequest) + + if createReportResponse.IsSuccesfull: + + if self.DEBUG: + print createReportResponse + print "Success. Creating Get Scan Report Status" + + inc = 0 + while inc < self.ttlReport: + inc += 1 + reportStatusResponse = self.client.service.GetScanReportStatus(self.sessionId, createReportResponse.ID) + if reportStatusResponse.IsSuccesfull and reportStatusResponse.IsReady: + break + + if self.DEBUG: + print "fail" + time.sleep(self.timeWaitReport) + + if self.DEBUG: + print "Sucess. Creating Get Scan Report" + responseScanResults = self.client.service.GetScanReport(self.sessionId, createReportResponse.ID ) + + if responseScanResults.IsSuccesfull and responseScanResults.ScanResults: + + XMLData = base64.b64decode(responseScanResults.ScanResults) + fileObj = open(fileName,"w+") + fileObj.write(XMLData) + fileObj.close() + # + # Get data from the Projects + # + def getProjectScannedDisplayData(self, filterOn=False): + tmp = self.client.service.GetProjectScannedDisplayData(self.sessionId) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + if not filterOn: + return self.convertToJson(tmp) + else: + return tmp.ProjectScannedList[0] + + # + # Get Project Display Data + # + def getProjectsDisplayData(self, filterOn=False): + tmp = self.client.service.GetProjectsDisplayData(self.sessionId) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + if not filterOn: + return self.convertToJson(tmp) + else: + return tmp.projectList[0] + + # + # Get Scan Info For All Projects + # + def getScanInfoForAllProjects(self, filterOn=False): + tmp = self.client.service.GetScansDisplayDataForAllProjects(self.sessionId) + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + + if not filterOn: + return self.convertToJson(tmp) + else: + return tmp + + # + # Get Preset List + # + def getPresetList(self): + tmp = self.client.service.GetPresetList(self.sessionId) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + return self.convertToJson(tmp) + + # + # Get Configuration List + # + def getConfigurationList(self): + tmp = self.client.service.GetConfigurationSetList(self.sessionId) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + return self.convertToJson(tmp) + + # + # Get Associated Groups List + # + def getAssociatedGroups(self): + tmp = self.client.service.GetAssociatedGroupsList(self.sessionId) + + if not tmp.IsSuccesfull: + raise Exception("Unable to get data from the server.") + + if self.DEBUG: + print dir(tmp) + + return self.convertToJson(tmp) + + # + # Filter For [getProjectScannedDisplayData] + # + def filterProjectScannedDisplayData(self, projectID): + tmpProjects = self.getProjectScannedDisplayData(True) + for project in tmpProjects: + if project.ProjectID == projectID: + return self.convertToJson(project) + + raise Exception("Could not find ProjectID: %s " % projectID) + + # + # Filter for [getProjectsDisplayData] + # + def filterProjectsDisplayData(self,projectID): + tmpProjects = self.getProjectsDisplayData(True) + for project in tmpProjects: + if project.projectID == projectID: + return self.convertToJson(project) + + raise Exception("Could not find ProjectID: %s " % projectID) + + # + # Filter for [getScanInfoForAllProjects] + # + def filterScanInfoForAllProjects(self,projectID): + tmpProjects = self.getScanInfoForAllProjects(True).ScanList[0] + for project in tmpProjects: + if project.ProjectId == projectID: + return self.convertToJson(project) + + raise Exception("Could not find ProjectID: %s " % projectID) + + # + # Get Suppressed Issues + # + def getSupressedIssues(self, scanID): + CxWSReportType = self.client.factory.create("CxWSReportType") + CxReportRequest = self.client.factory.create("CxWSReportRequest") + CxReportRequest.ScanID = scanID + CxReportRequest.Type = CxWSReportType.XML + createReportResponse = self.client.service.CreateScanReport(self.sessionId, CxReportRequest) + + print createReportResponse + + if createReportResponse.IsSuccesfull: + + if self.DEBUG: + print createReportResponse + print "Success. Creating Get Scan Report Status" + + inc = 0 + while inc < self.ttlReport: + inc += 1 + reportStatusResponse = self.client.service.GetScanReportStatus(self.sessionId, createReportResponse.ID) + if reportStatusResponse.IsSuccesfull and reportStatusResponse.IsReady: + break + + if self.DEBUG: + print "fail" + time.sleep(self.timeWaitReport) + + if self.DEBUG: + print "Sucess. Creating Get Scan Report" + responseScanResults = self.client.service.GetScanReport(self.sessionId, createReportResponse.ID ) + + if responseScanResults.IsSuccesfull and responseScanResults.ScanResults: + + XMLData = base64.b64decode(responseScanResults.ScanResults) + print XMLData + issues = re.findall('FalsePositive="([a-zA-Z]+)" Severity="([a-zA-Z]+)"', XMLData) + + if self.DEBUG: + print responseScanResults + print issues + + mediumSupressIssues = 0 + lowSupressIssues = 0 + highSupressIssues = 0 + otherSupressIssues = 0 + + for a,b in issues: + if a == "True": + if b == "Medium": + mediumSupressIssues += 1 + elif b == "High": + highSupressIssues += 1 + elif b == "Low": + lowSupressIssues += 1 + else: + otherSupressIssues += 1 + if self.DEBUG: + print highSupressIssues + print mediumSupressIssues + print lowSupressIssues + return {"highSupressIssues": highSupressIssues, "mediumSupressIssues": mediumSupressIssues, "lowSupressIssues": lowSupressIssues} + else: + raise Exception("Unable to Get Report") + + else: + raise Exception("Unable to get Supressed") + + # + # Convert Suds object into serializable format. + # + def recursive_asdict(self,d): + out = {} + for k, v in asdict(d).iteritems(): + if hasattr(v, '__keylist__'): + out[k] = self.recursive_asdict(v) + elif isinstance(v, list): + out[k] = [] + for item in v: + if hasattr(item, '__keylist__'): + out[k].append(self.recursive_asdict(item)) + else: + out[k].append(item) + else: + out[k] = v + return out + + + # + # Return Subs Object into Serializable format Handler + # + def convertToJson(self, data): + try: + tmp = self.recursive_asdict(data) + return json.dumps(tmp) + except Exception as e: + raise Exception("Unable to convert to JSON: %s" % e.message) diff --git a/tools/checkmarx/ScanProject.py b/tools/checkmarx/ScanProject.py new file mode 100644 index 0000000..6bcaac0 --- /dev/null +++ b/tools/checkmarx/ScanProject.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +"""ScanProject.py: Scans a project given a source folder and Checkmarx project""" + +__author__ = "Aaron Weaver" +__copyright__ = "Copyright 2018, Aaron Weaver" + +import PyCheckmarx +import argparse + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--project', help='Checkmarx Project ID for scanning', required=True) + parser.add_argument('--source', help='Source code directory', required=True) + parser.add_argument('--report', help='Name of report', required=True) + parser.add_argument('--url', help='Checkmarx URL', required=True) + parser.add_argument('--username', help='Checkmarx username', required=True) + parser.add_argument('--password', help='Checkmarx password', required=True) + args = parser.parse_args() + + pyC = PyCheckmarx.PyCheckmarx(args.username, args.password, args.url) + runID = pyC.scanExistingProject(args.project, args.source) + scanID = pyC.getStatusOfSingleScan(runID) + pyC.getXMLReport(scanID, args.report) diff --git a/tools/checkmarx/config.yaml b/tools/checkmarx/config.yaml new file mode 100644 index 0000000..4810d0a --- /dev/null +++ b/tools/checkmarx/config.yaml @@ -0,0 +1,61 @@ +checkmarx: + version: AppSecPipeline 0.5.0 + tags: + - "Static Scanner" + type: "static" + name: "checkmarx" + icon-sm: + icon-lg: + description: "Checkmarx is a source code analysis product that allows organization to scan uncompiled code and identify vulnerabilities." + url: https://www.checkmarx.com/technology/static-code-analysis-sca/ + documentation: https://checkmarx.atlassian.net/wiki/spaces/KC/overview + docker: appsecpipeline/base + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + CHECKMARX_PROJECT: + type: runtime + data_type: int + description: "Checkmarx Project ID" + CHECKMARX_URL: + type: config + data_type: url + description: "Checkmarx web URL." + CHECKMARX_USERNAME: + type: config + data_type: username + description: "Checkmarx username." + CHECKMARX_PASSWORD: + type: config + data_type: password + description: "Checkmarx password." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/checkmarx/ScanProject.py --url $CHECKMARX_URL --username $CHECKMARX_USERNAME --password $CHECKMARX_PASSWORD --project=$CHECKMARX_PROJECT --source=$LOC" + shell: False + report: "--report {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + languages: + - "ruby" + - "java" + - "c#.net" + - "php" + - "python" + - "groovy" + - "android" + - "ios" + - "html5" + - "c++" + - "vb.net" + - "vb" + - "pl/sql" + - "perl" + - "apex" + - "scala" + - "swift" + profiles: + all: " " diff --git a/tools/checkmarx/requirements.txt b/tools/checkmarx/requirements.txt new file mode 100644 index 0000000..19ba88a --- /dev/null +++ b/tools/checkmarx/requirements.txt @@ -0,0 +1,19 @@ +certifi==2017.7.27.1 +chardet==3.0.4 +click==6.7 +decorator==4.1.2 +Flask==0.12.2 +funcsigs==1.0.2 +idna==2.6 +itsdangerous==0.24 +Jinja2==2.9.6 +MarkupSafe==1.0 +mock==2.0.0 +pbr==3.1.1 +requests==2.18.4 +six==1.11.0 +stashy==0.3 +suds==0.4 +urllib3==1.22 +Werkzeug==0.12.2 +PyGithub==1.35 diff --git a/tools/cloc/config.yaml b/tools/cloc/config.yaml new file mode 100644 index 0000000..28ada0f --- /dev/null +++ b/tools/cloc/config.yaml @@ -0,0 +1,26 @@ +cloc: + tags: + - "Static Code Analyzer" + type: "code-analyzer" + version: AppSecPipeline 0.5.0 + icon-sm: + icon-lg: + description: "cloc counts blank lines, comment lines, and physical lines of source code in many programming languages." + url: https://github.com/AlDanial/cloc + documentation: https://github.com/AlDanial/cloc + docker: "appsecpipeline/base-tools" + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + parameters: "LOC : File or folder location of source code to examine. LOC=/temp/code" + pre: + exec: "cloc" + report: "--report-file={reportname}" + reportname: "languages.json" + post: "cat {reportname}" + junit: + profiles: + all: "$LOC --json" diff --git a/tools/defectdojo/.DS_Store b/tools/defectdojo/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..8388a30497810f1fc634e43af8cbec0c4d795e60 GIT binary patch literal 8196 zcmZQzU|@7AO)+F(kYHe7;9!8z0^AH(0Z1N%F(jFwB5WY@7#IW?7>XE57>bk1iwlx+ zpmL+sXb6mkz-S1JhQMeDjE2DA3IRrlb2xC+`w-btax?@+LtsRP0H}OWfVAxy9H4Xq zga%15FfuTJy8w&~3@oq!Vg&aC7(j9$tsokt6-0xyGBAQzU^Bp485p5j8NuBUkUjyh zQH%^=?F@`yn?d4W?F@`yn;94wA=()jp*Ax@dnk+$?F@_%?F@`y+rdVT5~Cq78Ukn` z0P4H5GZZnTG88c6Blq9)lNcBnP9_!PWEPhg7+hy$VrF4wW9Q)H<lyAxhz-ujFApwB zEGaE^N-T;7^D<LXlc4O7{QMj^J25FNGqpTkK*TveuOu-uFSQ6sc}#d_UP^wsQ+{b) zN-@}+fW(rFq{JeyMeza>)zv2EhB^v{riQgT3e^@yCOQgchDNou930}RhPIvwxs_Gb zHMMm!A>M+O4h;MdKAfDzz`%g!&79&g=cF8vH<=1y-n@bECbyWIh`UF;fIw++YEWuU zVo7FMs!L)?YH_>(e{n{Bxo2{IUO-W5S!QZ^cyfMDX>ML|yntYGVoq{tPGU)_V@^(R zW>so&L`iBz35XR{oL^Mp3|1JCnVg>o5{nlQ0_pcJD9OyvD-KI7D$dN$<K$e;BPu1Y ztf6aUZsT}`mych7Q;1U}yezXgGbtxEv^+DVBqK7hI5{;hB{MHQDl;WDuOu@qGqs3Q z;3sEDUSe)4Sd*Bzm?VdQGDk#dQf6UkW=SPjlv74bj#FSOM@&j$Nvdyt3P@uLSVl}n zOo@Xd8YJ#ql$uzQ3K8a16;tOFNaP49&a8qM%&Enx!zobA8Iqlu2T>!Y&uPdZkir?7 zlbDp612N5n(~LvFn=`x&;wy-tC8srqAfpavL~?#^ZfaghF+|u-%z;DT1V<dmZlA>B z690nKJgD_zGGZ<q9FZUy$CQ-R6b@wv4M?miI~YJ10-(A>l0k>Tj=_x~iXoXHkD;8Q zhoP6Dk6{+WVusZW8yR*p9Ah}naFXE^!+C}q3^y5WGu&Z#%J7ciJ;O(aPYnMVIT$$^ zxfsP5<rx(il^B&7H5d&UjTlWBO&P5iof%yi-5A{&eHp_T!x<wPqZpGIvl(+3^BD^m zD;b*@n;Baf+Zd-Z&SRX<xPWmL<7&n&jC&z2V}e#z44x1^jAY!8H8~bQlVcdR<S6SV z;;u3v$?+x+C=Ch<35$rzDQoB$npr!*;}0#$P*N3gQUa$QNb&$D8(6Y{Bv&zU2}vnw zPTA1Ryp+@m^b`(D`2y^qq{DFqq*`7<R#pI%W*}S@Ras5}MuDvyoM2f^Rasd9CJu<4 zuAVHX0JA_MRL;nlQ&xagAcYemYi_~GDZnC7%n8wHW6LQkz$V}gm34ID<P=~K_{qtM zCEYP_Fknx03mH~1>|{8`aEakM!(E0)46hj8GyGuq$H>CS$0)`q!6?lr!zjzB#;C!l z#i-4w!)VHA&S=SK#b^ypX&#J$jKPed;PjTjn8cXEn97*On9o?qSj<?$Sjt$(*Z@v< zEsVX4{frYCCoxWDoWeL4lDgn&&I7`Sk&MeQ(_C3_QC?1dUOKov#=yYJP{NSUkk60< z8^Z+^j>D@CK=LA*=V9SAf@o%lUnM~Oe^&+uT;u-`Rior+2#kinunYl47MEZbCuk*r z&4-}AHmE<H0F?waYe3y`Mo_;Tq7S45)U^k9#~C4$ieS}@43M5YBLk!-&&a?4(mHSh b0i*Rlyr+j&1tL|BkU8AZ`X8Kxha~|3huKgX literal 0 HcmV?d00001 diff --git a/tools/defectdojo/config.yaml b/tools/defectdojo/config.yaml new file mode 100644 index 0000000..0bfada5 --- /dev/null +++ b/tools/defectdojo/config.yaml @@ -0,0 +1,53 @@ +defectdojo: + version: AppSecPipeline 0.5.0 + name: DefectDojo + tags: + - "Vulnerability Management" + - "Report Consumer" + type: "collector" + description: "DefectDojo is a security program and vulnerability management tool. DefectDojo allows you to manage your application security program, maintain product and application information, schedule scans, triage vulnerabilities and push findings into defect trackers. Consolidate your findings into one source of truth with DefectDojo." + docker: "appsecpipeline/base-tools" + url: https://github.com/OWASP/django-DefectDojo + documentation: http://defectdojo.readthedocs.io/en/latest/ + parameters: + DOJO_ENGAGEMENT_ID: + type: runtime + data_type: int + description: "Engagement id that exists in DefectDojo." + DOJO_DIR: + type: runtime + data_type: string + description: "Directory where the report reside in for importing into DefectDojo." + BUILD_ID: + type: runtime + data_type: string + description: "Build ID from upstream CI/CD." + DOJO_API_KEY: + type: config + data_type: key + description: "Dojo API key." + DOJO_HOST: + type: config + data_type: string + description: "DefectDojo host." + DOJO_PRODUCT_ID: + type: runtime + data_type: int + description: "DefectDojo product id." + DOJO_PROXY: + type: config + data_type: url + description: "Optional proxy for connecting to DefectDojo." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/defectdojo/dojo_ci_cd.py --dir=$DOJO_DIR --api_key=$DOJO_API_KEY --host=$DOJO_HOST --product=$DOJO_PRODUCT_ID" + shell: False + post: + report: + reportname: + junit: + profiles: + all: "--build_id=$BUILD_ID --closeengagement" + close_engagement: "--engagement=$DOJO_ENGAGEMENT_ID --closeengagement" + engagement: "--engagement=$DOJO_ENGAGEMENT_ID" + all_proxy: "--proxy=$DOJO_PROXY --build_id=$BUILD_ID" diff --git a/tools/defectdojo/dojo_ci_cd.py b/tools/defectdojo/dojo_ci_cd.py new file mode 100644 index 0000000..ec01407 --- /dev/null +++ b/tools/defectdojo/dojo_ci_cd.py @@ -0,0 +1,344 @@ +""" +Example written by Aaron Weaver <aaron.weaver@owasp.org> +as part of the OWASP DefectDojo and OWASP AppSec Pipeline Security projects + +Description: CI/CD example for DefectDojo +""" +from defectdojo_api import defectdojo +from datetime import datetime, timedelta +import os, sys +import argparse +import time +import junit_xml_output +import shutil + +DEBUG = True + +test_cases = [] + +def junit(toolName, file): + + junit_xml = junit_xml_output.JunitXml(toolName, test_cases, total_tests=None, total_failures=None) + with open(file, 'w') as file: + print "\nWriting Junit test file: junit_dojo.xml" + file.write(junit_xml.dump()) + +def dojo_connection(host, api_key, user, proxy=None): + + if proxy is not None: + proxies = { + 'http': 'http://' + proxy, + 'https': 'http://' + proxy, + } + print proxy + # Instantiate the DefectDojo api wrapper + dd = defectdojo.DefectDojoAPI(host, api_key, user, proxies=proxies, verify_ssl=False, timeout=360, debug=True) + else: + dd = defectdojo.DefectDojoAPI(host, api_key, user, verify_ssl=False, timeout=360, debug=False) + + return dd + +def return_engagement(dd, product_id, user, build_id=None): + engagement_id = None + #Specify the product id + product_id = product_id + user_id = None + start_date = datetime.now() + end_date = start_date+timedelta(days=1) + + users = dd.list_users(user) + + if users.success == False: + print "Error in listing users: " + users.message + print "Exiting...\n" + sys.exit() + else: + user_id = users.data["objects"][0]["id"] + + engagementText = "CI/CD Integration" + if build_id is not None: + engagementText = engagementText + " - Build #" + build_id + + engagement_id = dd.create_engagement(engagementText, product_id, str(user_id), + "In Progress", start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d")) + + print "Engagement ID created: " + str(engagement_id) + + return engagement_id + +def process_findings(dd, engagement_id, dir, build=None): + test_ids = [] + for root, dirs, files in os.walk(dir): + for name in files: + file = os.path.join(os.getcwd(),root, name) + if "processed" not in str(file) and "error" not in str(file): + #Test for file extension + if file.lower().endswith(('.json', '.csv','.txt','.js', '.xml')): + test_id = processFiles(dd, engagement_id, file) + + if test_id is not None: + if str(test_id).isdigit(): + test_ids.append(str(test_id)) + else: + print "Skipped file, extension not supported: " + file + "\n" + return ','.join(test_ids) + +def moveFile(file, success): + path = os.path.dirname(file) + name = os.path.basename(file) + dest = None + + #folder for processed files + processFolder = os.path.join(path,"processed") + if not os.path.exists(processFolder): + os.mkdir(processFolder) + + #folder for error file + errorFolder = os.path.join(path,"error") + if not os.path.exists(errorFolder): + os.mkdir(errorFolder) + + if success == True: + dest = os.path.join(path,processFolder,name) + else: + dest = os.path.join(path,errorFolder,name) + + shutil.move(file, dest) + +def processFiles(dd, engagement_id, file, scanner=None, build=None): + upload_scan = None + scannerName = None + path=os.path.dirname(file) + name = os.path.basename(file) + tool = os.path.basename(path) + tool = tool.lower() + + test_id = None + date = datetime.now() + dojoDate = date.strftime("%Y-%m-%d") + + #Tools without an importer in Dojo; attempted to import as generic + if "generic" in name: + scanner = "Generic Findings Import" + print "Uploading " + tool + " scan: " + file + test_id = dd.upload_scan(engagement_id, scanner, file, "true", dojoDate, build) + if test_id.success == False: + print "An error occured while uploading the scan: " + test_id.message + moveFile(file, False) + else: + print "Succesful upload, TestID: " + str(test_id) + "\n" + moveFile(file, True) + else: + if tool == "burp": + scannerName = "Burp Scan" + elif tool == "nessus": + scannerName = "Nessus Scan" + elif tool == "nmap": + scannerName = "Nmap Scan" + elif tool == "nexpose": + scannerName = "Nexpose Scan" + elif tool == "veracode": + scannerName = "Veracode Scan" + elif tool == "checkmarx": + scannerName = "Checkmarx Scan" + elif tool == "zap": + scannerName = "ZAP Scan" + elif tool == "appspider": + scannerName = "AppSpider Scan" + elif tool == "arachni": + scannerName = "Arachni Scan" + elif tool == "vcg": + scannerName = "VCG Scan" + elif tool == "dependency-check": + scannerName = "Dependency Check Scan" + elif tool == "retirejs": + scannerName = "Retire.js Scan" + elif tool == "nodesecurity": + scannerName = "Node Security Platform Scan" + elif tool == "qualys": + scannerName = "Qualys Scan" + elif tool == "qualyswebapp": + scannerName = "Qualys Webapp Scan" + elif tool == "openvas": + scannerName = "OpenVAS CSV" + elif tool == "snyk": + scannerName = "Snyk Scan" + else: + print "Tool not defined in dojo_ci_cd script: " + tool + + if scannerName is not None: + print "Uploading " + scannerName + " scan: " + file + test_id = dd.upload_scan(engagement_id, scannerName, file, "true", dojoDate, build) + if test_id.success == False: + print "An error occured while uploading the scan: " + test_id.message + moveFile(file, False) + else: + print "Succesful upload, TestID: " + str(test_id) + moveFile(file, True) + + return test_id + +def create_findings(dd, engagement_id, scanner, file, build=None): + # Upload the scanner export + if engagement_id > 0: + print "Uploading scanner data." + date = datetime.now() + + upload_scan = dd.upload_scan(engagement_id, scanner, file, "true", date.strftime("%Y-%m-%d"), build=build) + + if upload_scan.success: + test_id = upload_scan.id() + else: + print upload_scan.message + quit() + +def summary(dd, engagement_id, test_ids, max_critical=0, max_high=0, max_medium=0): + findings = dd.list_findings(engagement_id_in=engagement_id, duplicate="false", active="true", verified="true") + if findings.success: + print"==============================================" + print "Total Number of Vulnerabilities: " + str(findings.data["meta"]["total_count"]) + print"==============================================" + print_findings(sum_severity(findings)) + print + else: + print "An error occurred: " + findings.message + + findings = dd.list_findings(test_id_in=test_ids, duplicate="true") + + if findings.success: + print"==============================================" + print "Total Number of Duplicate Findings: " + str(findings.data["meta"]["total_count"]) + print"==============================================" + print_findings(sum_severity(findings)) + print + """ + #Delay while de-dupes + sys.stdout.write("Sleeping for 30 seconds to wait for dedupe celery process:") + sys.stdout.flush() + for i in range(15): + time.sleep(2) + sys.stdout.write(".") + sys.stdout.flush() + """ + else: + print "An error occurred: " + findings.message + + findings = dd.list_findings(test_id_in=test_ids, duplicate="false", limit=500) + + if findings.success: + if findings.count() > 0: + for finding in findings.data["objects"]: + test_cases.append(junit_xml_output.TestCase(finding["title"] + " Severity: " + finding["severity"], finding["description"],"failure")) + #if not os.path.exists("reports"): + # os.mkdir("reports") + #junit("DefectDojo", "reports/junit_dojo.xml") + + print"\n==============================================" + print "Total Number of New Findings: " + str(findings.data["meta"]["total_count"]) + print"==============================================" + sum_new_findings = sum_severity(findings) + print_findings(sum_new_findings) + print + print"==============================================" + + strFail = None + if max_critical is not None: + if sum_new_findings[4] > max_critical: + strFail = "Build Failed: Max Critical" + if max_high is not None: + if sum_new_findings[3] > max_high: + strFail = strFail + " Max High" + if max_medium is not None: + if sum_new_findings[2] > max_medium: + strFail = strFail + " Max Medium" + if strFail is None: + print "Build Passed!" + else: + print "Build Failed: " + strFail + print"==============================================" + else: + print "An error occurred: " + findings.message + +def sum_severity(findings): + severity = [0,0,0,0,0] + for finding in findings.data["objects"]: + if finding["severity"] == "Critical": + severity[4] = severity[4] + 1 + if finding["severity"] == "High": + severity[3] = severity[3] + 1 + if finding["severity"] == "Medium": + severity[2] = severity[2] + 1 + if finding["severity"] == "Low": + severity[1] = severity[1] + 1 + if finding["severity"] == "Info": + severity[0] = severity[0] + 1 + + return severity + +def print_findings(findings): + print "Critical: " + str(findings[4]) + print "High: " + str(findings[3]) + print "Medium: " + str(findings[2]) + print "Low: " + str(findings[1]) + print "Info: " + str(findings[0]) + +class Main: + if __name__ == "__main__": + parser = argparse.ArgumentParser(description='CI/CD integration for DefectDojo') + parser.add_argument('--host', help="Dojo Hostname", required=True) + parser.add_argument('--api_key', help="API Key: user:guidvalue", required=True) + parser.add_argument('--product', help="Dojo Product ID", required=True) + parser.add_argument('--file', help="Scanner file", required=False) + parser.add_argument('--dir', help="Scanner directory, needs to have the scanner name with the scan file in the folder. Ex: reports/nmap/nmap.csv", required=False, default="reports") + parser.add_argument('--scanner', help="Type of scanner", required=False) + parser.add_argument('--build_id', help="Build ID", required=False) + parser.add_argument('--engagement', help="Engagement ID (optional)", required=False) + parser.add_argument('--closeengagement', help="Close Engagement", required=False, action='store_true') + parser.add_argument('--critical', help="Maximum new critical vulns to pass the build.", required=False) + parser.add_argument('--high', help="Maximum new high vulns to pass the build.", required=False) + parser.add_argument('--medium', help="Maximum new medium vulns to pass the build.", required=False) + parser.add_argument('--proxy', help="Proxy, specify as host:port, ex: localhost:8080") + + #Parse arguments + args = vars(parser.parse_args()) + host = args["host"] + api_key = args["api_key"] + + product_id = args["product"] + file = args["file"] + dir = args["dir"] + scanner = args["scanner"] + engagement_id = args["engagement"] + closeEngagement = args["closeengagement"] + max_critical = args["critical"] + max_high = args["high"] + max_medium = args["medium"] + build_id = args["build_id"] + proxy = args["proxy"] + + if dir is not None or file is not None: + if ":" not in api_key: + print "API Key not in the correct format, must be: <user>:<guid>" + quit() + apiParsed = api_key.split(':') + user = apiParsed[0] + api_key = apiParsed[1] + dd = dojo_connection(host, api_key, user, proxy) + if engagement_id is None: + engagement_id = return_engagement(dd, product_id, user, build_id=build_id) + test_ids = None + if file is not None: + if scanner is not None: + test_ids = processFiles(dd, engagement_id, file, scanner=scanner) + else: + print "Scanner type must be specified for a file import. --scanner" + else: + test_ids = process_findings(dd, engagement_id, dir, build_id) + + #Close the engagement + if closeEngagement == True: + dd.close_engagement(engagement_id) + + summary(dd, engagement_id, test_ids, max_critical, max_high, max_medium) + else: + print "No file or directory to scan specified." diff --git a/tools/dependency-check/config.yaml b/tools/dependency-check/config.yaml new file mode 100644 index 0000000..0ac315d --- /dev/null +++ b/tools/dependency-check/config.yaml @@ -0,0 +1,35 @@ +dependency-check: + version: AppSecPipeline 0.5.0 + tags: + - "Components with known Vulnerabilities" + type: "static" + description: "Dependency-Check is a utility that identifies project dependencies and checks if there are any known, publicly disclosed, vulnerabilities. Currently Java and .NET are supported; additional experimental support has been added for Ruby, Node.js, Python, and limited support for C/C++ build systems (autoconf and cmake)." + docker: "appsecpipeline/sast" + url: https://www.owasp.org/index.php/OWASP_Dependency_Check + documentation: https://jeremylong.github.io/DependencyCheck/ + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + PROJECT: + type: runtime + data_type: string + description: "Name of the Dependency project." + commands: + pre: + exec: "/usr/bin/dependency-check/bin/dependency-check.sh" + shell: False + post: + report: "--out {reportname} --format XML" + reportname: "{timestamp}.xml" + junit: + languages: + - "java" + - "nodejs" + - "ruby" + - ".net" + - "python" + profiles: + #Runs the full dependency scan, only updates every week + all: "--project $PROJECT --scan $LOC --cveValidForHours 168" diff --git a/tools/git/.DS_Store b/tools/git/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z45|!R0Z1N%F(jFgL>QrFAPJ2!M?+vV1V%$(Gz3ON zU^D~<VF)ln+{D2Rp-0Kl5Eu=C(GY-#0H}OW0QD6Z7#JL&bOVG2Nii@oFo3%Nj0_Ac zFio(203!nfNGnJUNGpg2X=PvpvA|}4wK6b5wK9UcAq)(R;4TS>25V<v1ltVagS9g- zf^BACV1#IAV1(Mt2<@RTf_gL{^C8+97{Ru~TsKOOhQMeDz(Rl-!Vmz}|E>%SxcdJP zRior+2#kinunYl47MEZbCs3t{!+W4QHvuXKVuPw;Mo^s$(F3lEVT}ML$bg~*R5_@+ b2Uo?6kTwK}57Iu`5P${HC_Nei0}uiLNUI8I literal 0 HcmV?d00001 diff --git a/tools/git/config.yaml b/tools/git/config.yaml new file mode 100644 index 0000000..e91d895 --- /dev/null +++ b/tools/git/config.yaml @@ -0,0 +1,33 @@ +git: + version: AppSecPipeline 0.5.0 + tags: + - "Utility" + type: "utility" + description: "Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency." + docker: "appsecpipeline/base" + url: https://git-scm.com/ + documentation: https://git-scm.com/docs/git + parameters: + GIT_URL: + type: runtime + data_type: url + description: "URL of the source code repository." + LOC: + type: runtime + data_type: string + description: "Location of the source code." + GIT_TAGS: + type: runtime + data_type: string + description: "Checkout a specified tag." + commands: + pre: + exec: "sh /usr/bin/appsecpipeline/tools/git/git.sh" + shell: False + post: + report: + reportname: + junit: + profiles: + clone: "clone $GIT_URL $LOC" + tags: "tag $GIT_URL $LOC $GIT_TAGS" diff --git a/tools/git/git.sh b/tools/git/git.sh new file mode 100644 index 0000000..028e26b --- /dev/null +++ b/tools/git/git.sh @@ -0,0 +1,12 @@ +action=$1 +repo=$2 +dest=$3 +tag=$4 + +git clone $repo $dest +cd $dest + +if [ $action = "tag" ]; then + git fetch --all + git checkout $3 +fi diff --git a/tools/health.py b/tools/health.py new file mode 100644 index 0000000..13b88e4 --- /dev/null +++ b/tools/health.py @@ -0,0 +1,24 @@ +import re +from datetime import datetime +import subprocess +import shlex +import sys + +def days_between(dateCompare): + d1 = datetime.strptime(dateCompare, "%Y-%m-%d %H:%M:%S") + return abs((datetime.now() - d1).seconds/60) + +age = 0 + +uptime = subprocess.check_output(shlex.split("stat /proc/1/cmdline")) + +for line in uptime.splitlines(): + dockerStartTime = re.search("Access\:\s(\d{1,4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})", line) + + if dockerStartTime: + age = days_between(dockerStartTime.group(1)) + break + +#Make configurable at some point, terminate if longer than 12 hours / 720 +if age > 720: + sys.exit(1) diff --git a/tools/junit.py b/tools/junit.py new file mode 100755 index 0000000..5a54b46 --- /dev/null +++ b/tools/junit.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +import xml.etree.ElementTree as ET +import csv +from datetime import datetime +import re +import argparse +import os +import junit_xml_output + +test_cases = [] + +def junit(toolName, file): + + junit_xml = junit_xml_output.JunitXml(toolName, test_cases, total_tests=None, total_failures=None) + with open(file, 'w') as file: + print "Writing Junit test files" + file.write(junit_xml.dump()) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-t", "--tool", help="Tool name", required=True) + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + + test_cases = [] + TITLE = 1 + DESCRIPTION = 5 + base = os.path.basename(args.file) + fileName = os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv") + csvToParse = fileName + + #Test for file + if os.path.isfile("csvToParse"): + with open(csvToParse, 'rb') as csvfile: + reader = csv.reader(csvfile, delimiter=',') + first = True + for row in reader: + if first: + first = False + else: + #Output a junit test file, should lows/med be condsider a failure? + test_cases.append(junit_xml_output.TestCase(row[TITLE], row[DESCRIPTION],"failure")) + + junit(args.tool, os.path.join(os.path.dirname(args.file), "junit", "junit_" + os.path.splitext(base)[0] + ".xml")) + else: + print "File passed in doesn't exist." diff --git a/tools/launch.py b/tools/launch.py new file mode 100644 index 0000000..97880b8 --- /dev/null +++ b/tools/launch.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python +import logging + +"""Launch.py: Starts tooling based on the supplied yaml file in support of the AppSec Pipeline.""" + +__author__ = "Aaron Weaver" +__copyright__ = "Copyright 2017, Aaron Weaver" + +import yaml +import argparse +import sys +import shlex +import os +import string +import uuid +from subprocess import call +from datetime import datetime +import base64 +from cryptography.fernet import Fernet + +baseLocation = "/usr/bin/appsecpipeline/" +baseData = "/opt/appsecpipeline/" + +reportsDir = os.path.join(baseData,"reports") + +def getYamlConfig(toolName): + #Expecting config file in tools/<toolname>/config.yaml + yamlLoc = os.path.join(baseLocation, "tools",toolName,"config.yaml") + + if not os.path.exists(yamlLoc): + raise RuntimeError("Tool config does not exist. Checked in: " + yamlLoc) + + return yamlLoc + +def getParameterAttribs(toolName, command, authFile, key): + with open(authFile, 'r') as stream: + try: + #Tool configuration + config = yaml.safe_load(stream) + + #Load the key + f = Fernet(key) + + if toolName in config: + #Set the object to the tool yaml section + tool = config[toolName] + toolParms = tool["parameters"] + for parameter in toolParms: + if parameter in command: + command = command.replace("$" + parameter, f.decrypt(toolParms[parameter]["value"])) + + except yaml.YAMLError as exc: + logging.warning(exc) + + return command + +#Allow for dynamic arguments to support a wide variety of tools +#Format URL=Value, YAML Definition for substitution $URL +def substituteArgs(args, command, toolName, authFile=None, key=None): + #Replace tool credential settings if a tool config yaml exists + if authFile and key: + command = getParameterAttribs(toolName, command, authFile, key) + + for arg in args: + #print "Arguments: " + #print arg + env = arg.split("=", 1) #Only split on the first '=' + if len(env) > 1: + name = env[0] + value = env[1] + #Replace values if those values exist in the command + """ + print "name" + print name.lower() + print "value" + print value + print "Command" + print command.lower() + """ + if name.lower() in command.lower(): + + if name.startswith('--'): + name = name.replace("--","", 1) + + if name.startswith('-'): + name = name.replace("-","", 1) + + command = command.replace("$" + name, value) + print "Command replaced: " + command + + #Check if any command haven't been replaced and see if it's the app runtime config + if "$" in command: + try: + yamlLoc = os.path.join(os.path.join(reportsDir,"prepenv"),"appruntime.yaml") + logging.info("YAML file: " + yamlLoc) + logging.info(os.listdir(os.path.join(reportsDir,"prepenv"))) + + with open(yamlLoc, 'r') as stream: + try: + launchCmd = None + report = None + fullReportName = None + profile_found = False + + #Tool configuration + config = yaml.safe_load(stream) + + for item in config: + if item.lower() in command.lower(): + command = command.replace("$" + item, config[item]) + except yaml.YAMLError as exc: + logging.warning(exc) + except Exception as e: + logging.info("YAML prep file not found, skipping. Detail: " + str(e)) + return command + +def slugify(s): + """ + Normalizes string for foldername + """ + valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) + filename = ''.join(c for c in s if c in valid_chars) + filename = filename.replace(' ','_') + return filename + +def reportName(toolName, reportString): + filename = None + basePath = os.path.join(reportsDir,toolName) + + if "{timestamp}" in reportString: + #Add the folder path and datetimestamps + #toolName/toolName_2017-11-02-1-05-04.csv + datestring = datetime.strftime(datetime.now(), '%Y-%m-%d-%H-%M-%S') + filename = reportString.replace("{timestamp}", os.path.join(basePath, toolName + "_" + datestring + "_" + str(uuid.uuid4()))) + else: + #If the filename is a static name + filename = os.path.join(basePath,reportString) + + return filename + +def checkFolderPath(toolName): + #Create a directory to store the reports / junit + if not os.path.exists(reportsDir): + os.mkdir(reportsDir) + + toolPath = os.path.join(reportsDir,slugify(toolName)) + + if not os.path.exists(toolPath): + os.mkdir(toolPath) + + junitPath = os.path.join(reportsDir,"junit") + if not os.path.exists(junitPath): + os.mkdir(junitPath) + +def executeTool(toolName, profile_run, credentialedScan, test_mode, auth=None, key=None): + logging.info("Tool: " + toolName) + yamlConfig = getYamlConfig(toolName) + + with open(yamlConfig, 'r') as stream: + try: + launchCmd = None + report = None + fullReportName = None + profile_found = False + + #Tool configuration + config = yaml.safe_load(stream) + + #Set the object to the tool yaml section + tool = config[toolName] + + #Tooling commands + commands = tool["commands"] + + if profile_run in tool["profiles"]: + launchCmd = tool["profiles"][profile_run] + profile_found = True + + #Launch only if command exists + if profile_found and launchCmd: + if commands["report"] is not None: + fullReportName = reportName(slugify(toolName), commands["reportname"]) + launchCmd = launchCmd + " " + commands["report"].replace("{reportname}", fullReportName) + + #Only launch command if a launch command is specified + #Pre and post require a launch command + if launchCmd: + #Create a directory to store the reports + checkFolderPath(toolName) + logging.info("Created reports folder") + #Execute a pre-commmand, such as a setup or updated requirement + if commands["pre"] is not None: + if not test_mode: + preCommands = substituteArgs(remaining_argv, commands["pre"], toolName, auth, key) + logging.info("*****************************") + logging.info("Pre-Launch: " + preCommands) + logging.info("*****************************") + call(shlex.split(preCommands)) + + launchCmd = commands["exec"] + " " + launchCmd + + #Check for credentialed scan + if credentialedScan is not None: + if "credentials" in tool: + if credentialedScan in tool["credentials"]: + launchCmd = launchCmd + " " + tool["credentials"][credentialedScan] + else: + logging.warning("Credential profile not found.") + else: + logging.warning("Credential command line option passed but no credential profile exists in config.yaml.") + + logging.info(launchCmd) + #Substitute any environment variables + launchCmd = substituteArgs(remaining_argv, launchCmd, toolName, auth, key) + logging.info("*****************************") + logging.info("Launch: " + launchCmd) + #print "Launch: " + base64.b64encode(launchCmd) + logging.info("*****************************") + #Check for any commands that have not been substituted and warn + if "$" in launchCmd: + logging.warning("*****************************") + logging.info("Warning: Some commands haven't been substituted. Exiting.") + logging.info(launchCmd) + logging.info("*****************************") + sys.exit(1) + + if not test_mode: + if "shell" in commands: + if commands["shell"] == True: + logging.info("Using shell call") + call(launchCmd, shell=True) + else: + call(shlex.split(launchCmd)) + else: + call(shlex.split(launchCmd)) + + #Execute a pre-commmand, such as a setup or update requirement + if commands["post"] is not None: + #Look into making this more flexible with dynamic substitution + postCmd = commands["post"] + postCmd = postCmd.replace("{reportname}", fullReportName) + logging.info("*****************************") + logging.info("Post Command: " + postCmd) + logging.info("*****************************") + if not test_mode: + #review and see what other options we have + call(postCmd, shell=True) + #call(shlex.split(postCmd)) + if commands["junit"] is not None: + #Look into making this more flexible with dynamic substitution + junitCmd = commands["junit"] + junitCmd = junitCmd.replace("{reportname}", fullReportName) + logging.info("*****************************") + logging.info("Junit Command: " + junitCmd) + logging.info("*****************************") + if not test_mode: + #review and see what other options we have + call(shlex.split(junitCmd)) + else: + logging.warning("Profile or command to run not found in Yaml configuration file.") + except yaml.YAMLError as exc: + print(exc) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(add_help=False) + #Command line options + parser.add_argument("-t", "--tool", help="Tool to Run", required=True) + parser.add_argument("-p", "--profile", help="Profile to Execute", default=None) + parser.add_argument("-c", "--credential", help="Scan with login credentials. Specify credentialed profile.", default=None) + parser.add_argument("-m", "--test", help="Run the command in test mode only, non-execution.", default=False) + parser.add_argument("-f", "--runevery", help="Runs a runevery tool after each step, for example DefectDojo.", default=False) + parser.add_argument("-fp", "--runevery-profile", help="runevery tool profile.", default=False) + parser.add_argument("-l", "--log", help="Logging level: debug, info, warning, error, critical", default="debug") + parser.add_argument("-a", "--auth", help="Tool configuration credentials and or API keys.", required=False, default=None) + parser.add_argument("-k", "--key", help="Key for decrypting configuration. (string)", default=None) + + args, remaining_argv = parser.parse_known_args() + + profile_run = None + profile_found = False + test_mode = False + credentialedScan = None + runeveryTool = None + runeveryProfile = None + + if args.runevery: + runeveryTool = args.runevery + + if args.runevery_profile: + runeveryProfile = args.runevery_profile + + if args.profile: + profile_run = args.profile + + if args.credential: + credentialedScan = args.credential + + if args.test: + test_mode = args.test + + if args.tool: + tool = args.tool + + loglevel = args.log + numeric_level = getattr(logging, loglevel.upper(), None) + if not isinstance(numeric_level, int): + raise ValueError('Invalid log level: %s' % loglevel) + + logfile_dir = os.path.join(baseData,"logs") + if not os.path.exists(logfile_dir): + os.mkdir(logfile_dir) + logfile_name = os.path.join(logfile_dir,tool + ".log") + logging.basicConfig(filename=logfile_name, filemode="w", level=numeric_level) + + executeTool(tool, profile_run, credentialedScan, test_mode, args.auth, args.key) + + if runeveryTool is not None: + executeTool(runeveryTool, runeveryProfile, False, test_mode, args.auth, args.key) diff --git a/tools/nikto/config.yaml b/tools/nikto/config.yaml new file mode 100644 index 0000000..30210ef --- /dev/null +++ b/tools/nikto/config.yaml @@ -0,0 +1,37 @@ +nikto: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "Web server scanner which performs comprehensive tests against web servers for multiple items, including over 3500 potentially dangerous files/CGIs, versions on over 900 servers, and version specific problems on over 250 servers." + url: https://cirt.net/Nikto2 + documentation: https://cirt.net/nikto2-docs/ + docker: "appsecpipeline/base-tools" + parameters: + URL: + type: runtime + data_type: url + description: "URL of the site to scan." + commands: + pre: + exec: "nikto -h $URL" + report: "-output '{reportname}'" + reportname: "{timestamp}.xml" + post: "python /usr/bin/appsecpipeline/tools/nikto/parser.py -f '{reportname}'" + junit: "junit.py -f '{reportname}' -t nikto" + profiles: + all: "" + tuned: "-Tuning x 6" + fast: "-Plugins \"headers;report_xml\"" + file_upload: "-Tuning 0" + misconfig: "-Tuning 2" + info: "-Tuning 3" + xss: "-Tuning 4" + remote: "-Tuning 57" + dos: "-Tuning 6" + command_exec: "-Tuning 8" + sqli: "-Tuning 9" + identification: "-Tuning b" diff --git a/tools/nikto/parser.py b/tools/nikto/parser.py new file mode 100644 index 0000000..0ad752d --- /dev/null +++ b/tools/nikto/parser.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python +import xml.etree.ElementTree as ET +import csv +from datetime import datetime +import re +import argparse +import os + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + + #Parse the XML file + tree = None + try: + #Open up the XML file from the nikto output + tree = ET.parse(args.file) + root = tree.getroot() + scan = root.find('scandetails') + datestring = datetime.strftime(datetime.now(), '%m/%d/%Y') + + #Find only the base filname, save as csv + base = os.path.basename(args.file) + + csv_output = open(os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv"), 'w') + csvwriter = csv.writer(csv_output) + + """ + Date: :: + Date of the finding in mm/dd/yyyy format. + Title: :: + Title of the finding + CweId: :: + Cwe identifier, must be an integer value. + Url: :: + Url associated with the finding. + Severity: :: + Severity of the finding. Must be one of Info, Low, Medium, High, or Critical. + Description: :: + Description of the finding. Can be multiple lines if enclosed in double quotes. + Mitigation: :: + Possible Mitigations for the finding. Can be multiple lines if enclosed in double quotes. + Impact: :: + Detailed impact of the finding. Can be multiple lines if enclosed in double quotes. + References: :: + References associated with the finding. Can be multiple lines if enclosed in double quotes. + Active: :: + Indicator if the finding is active. Must be empty, True or False + Verified: :: + Indicator if the finding has been verified. Must be empty, True, or False + FalsePositive: :: + Indicator if the finding is a false positive. Must be empty, True, or False + Duplicate: :: + Indicator if the finding is a duplicate. Must be empty, True, or False + """ + csvwriter.writerow(["Date","Title","CweId","Url","Severity","Description","Mitigation","Impact","References","Active","Verified","FalsePositive","Duplicate"]) + + for item in scan.findall('item'): + finding = [] + + #CSV format + + ####### Individual fields ######## + #Date + finding.append(datestring) + + #Title + titleText = None + description = item.find("description").text + #Cut the title down to the first sentence + sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', description) + if len(sentences) > 0: + titleText = sentences[0][:900] + else: + titleText = description[:900] + finding.append(titleText) + + #CweId + finding.append("0") + + #Url + ip = item.find("iplink").text + #Remove the port numbers for 80/443 + ip = ip.replace(":80","") + ip = ip.replace(":443","") + + finding.append(ip) + + #Severity + finding.append("Low") #Nikto doesn't assign severity, default to low + + #Description + finding.append(item.find("description").text) + + #Mitigation + finding.append("") + + #Impact + finding.append("") + + #References + finding.append("") + + #Active + finding.append("False") + + #Verified + finding.append("False") + + #FalsePositive + finding.append("False") + + #Duplicate + finding.append("False") + + csvwriter.writerow(finding) + + csv_output.close() + except: + print "Nothing in report" diff --git a/tools/nmap/config.yaml b/tools/nmap/config.yaml new file mode 100644 index 0000000..9821376 --- /dev/null +++ b/tools/nmap/config.yaml @@ -0,0 +1,58 @@ +nmap: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "infrastructure" + icon-sm: + icon-lg: + description: "Nmap is a free and open source (license) utility for network discovery and security auditing. Many systems and network administrators also find it useful for tasks such as network inventory, managing service upgrade schedules, and monitoring host or service uptime. Nmap uses raw IP packets in novel ways to determine what hosts are available on the network, what services (application name and version) those hosts are offering, what operating systems (and OS versions) they are running, what type of packet filters/firewalls are in use, and dozens of other characteristics. It was designed to rapidly scan large networks, but works fine against single hosts." + url: https://nmap.org/ + documentation: https://nmap.org/book/man.html + docker: "appsecpipeline/base-tools" + parameters: + TARGET: + type: runtime + data_type: host + description: "Target hostname of the site to scan." + commands: + pre: + exec: "nmap" + shell: False + report: "-oX {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + credentials: + simple: + profiles: + #Full handshake, fairly fast + intensive_evident: "-sT -p- -A -T4 $TARGET" + #Scans all TCP ports + all: "-p 1-65535 -T4 -A -v $TARGET" + #Default everything. Will issue a TCP SYN scan for the most common 1000 TCP ports, using ICMP Echo request (ping) for host detection. + regular: "$TARGET" + #scan the most common TCP ports. It will make an effort in determining the OS type and what services and their versions are running. + intense: "-T4 -A -v $TARGET" + #Same as the intense scan and will also scan UDP ports (-sU) + intense_udp: "-sS -sU -T4 -A -v $TARGET" + #Do only a ping only on the target, no port scan. + ping: "-sn $TARGET" + #Limiting the number of TCP ports scanned to only the top 100 most common TCP ports + quick: "-T4 -F $TARGET" + #Version and OS detection + quick_light: "-sV -T4 -O -F –version-light $TARGET" + #determine hosts and routers in a network scan. It will traceroute and ping all hosts defined in the target. + traceroute: "-sn –traceroute $TARGET" + #Intense scan plus UDP, highly intrusive and very slow + comprehensive: "-sS -sU -T4 -A -v -PE -PP -PS80,443 -PA3389 -PU40125 -PY -g 53 -script \"default or (discovery and safe)\" $TARGET" + #SYN scan + syn: "–sS $TARGET" + #UDP scan + udp: "–sU $TARGET" + #SCTP INIT Scan, Like SYN scan, INIT scan is relatively unobtrusive and stealthy, since it never completes SCTP associations. + sctp: "–sY $TARGET" + #TCP Window Scan, exactly the same as ACK scan, except that it exploits an implementation detail of certain systems to differentiate open ports from closed ones, rather than always printing unfiltered when an RST is returned. + windows: "–sW $TARGET" + #Stealth scan + stealth: "-sS -p- -T2 $TARGET" diff --git a/tools/prepenv/.DS_Store b/tools/prepenv/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..8388a30497810f1fc634e43af8cbec0c4d795e60 GIT binary patch literal 8196 zcmZQzU|@7AO)+F(kYHe7;9!8z0^AH(0Z1N%F(jFwB5WY@7#IW?7>XE57>bk1iwlx+ zpmL+sXb6mkz-S1JhQMeDjE2DA3IRrlb2xC+`w-btax?@+LtsRP0H}OWfVAxy9H4Xq zga%15FfuTJy8w&~3@oq!Vg&aC7(j9$tsokt6-0xyGBAQzU^Bp485p5j8NuBUkUjyh zQH%^=?F@`yn?d4W?F@`yn;94wA=()jp*Ax@dnk+$?F@_%?F@`y+rdVT5~Cq78Ukn` z0P4H5GZZnTG88c6Blq9)lNcBnP9_!PWEPhg7+hy$VrF4wW9Q)H<lyAxhz-ujFApwB zEGaE^N-T;7^D<LXlc4O7{QMj^J25FNGqpTkK*TveuOu-uFSQ6sc}#d_UP^wsQ+{b) zN-@}+fW(rFq{JeyMeza>)zv2EhB^v{riQgT3e^@yCOQgchDNou930}RhPIvwxs_Gb zHMMm!A>M+O4h;MdKAfDzz`%g!&79&g=cF8vH<=1y-n@bECbyWIh`UF;fIw++YEWuU zVo7FMs!L)?YH_>(e{n{Bxo2{IUO-W5S!QZ^cyfMDX>ML|yntYGVoq{tPGU)_V@^(R zW>so&L`iBz35XR{oL^Mp3|1JCnVg>o5{nlQ0_pcJD9OyvD-KI7D$dN$<K$e;BPu1Y ztf6aUZsT}`mych7Q;1U}yezXgGbtxEv^+DVBqK7hI5{;hB{MHQDl;WDuOu@qGqs3Q z;3sEDUSe)4Sd*Bzm?VdQGDk#dQf6UkW=SPjlv74bj#FSOM@&j$Nvdyt3P@uLSVl}n zOo@Xd8YJ#ql$uzQ3K8a16;tOFNaP49&a8qM%&Enx!zobA8Iqlu2T>!Y&uPdZkir?7 zlbDp612N5n(~LvFn=`x&;wy-tC8srqAfpavL~?#^ZfaghF+|u-%z;DT1V<dmZlA>B z690nKJgD_zGGZ<q9FZUy$CQ-R6b@wv4M?miI~YJ10-(A>l0k>Tj=_x~iXoXHkD;8Q zhoP6Dk6{+WVusZW8yR*p9Ah}naFXE^!+C}q3^y5WGu&Z#%J7ciJ;O(aPYnMVIT$$^ zxfsP5<rx(il^B&7H5d&UjTlWBO&P5iof%yi-5A{&eHp_T!x<wPqZpGIvl(+3^BD^m zD;b*@n;Baf+Zd-Z&SRX<xPWmL<7&n&jC&z2V}e#z44x1^jAY!8H8~bQlVcdR<S6SV z;;u3v$?+x+C=Ch<35$rzDQoB$npr!*;}0#$P*N3gQUa$QNb&$D8(6Y{Bv&zU2}vnw zPTA1Ryp+@m^b`(D`2y^qq{DFqq*`7<R#pI%W*}S@Ras5}MuDvyoM2f^Rasd9CJu<4 zuAVHX0JA_MRL;nlQ&xagAcYemYi_~GDZnC7%n8wHW6LQkz$V}gm34ID<P=~K_{qtM zCEYP_Fknx03mH~1>|{8`aEakM!(E0)46hj8GyGuq$H>CS$0)`q!6?lr!zjzB#;C!l z#i-4w!)VHA&S=SK#b^ypX&#J$jKPed;PjTjn8cXEn97*On9o?qSj<?$Sjt$(*Z@v< zEsVX4{frYCCoxWDoWeL4lDgn&&I7`Sk&MeQ(_C3_QC?1dUOKov#=yYJP{NSUkk60< z8^Z+^j>D@CK=LA*=V9SAf@o%lUnM~Oe^&+uT;u-`Rior+2#kinunYl47MEZbCuk*r z&4-}AHmE<H0F?waYe3y`Mo_;Tq7S45)U^k9#~C4$ieS}@43M5YBLk!-&&a?4(mHSh b0i*Rlyr+j&1tL|BkU8AZ`X8Kxha~|3huKgX literal 0 HcmV?d00001 diff --git a/tools/prepenv/config.yaml b/tools/prepenv/config.yaml new file mode 100644 index 0000000..f732641 --- /dev/null +++ b/tools/prepenv/config.yaml @@ -0,0 +1,51 @@ +prepenv: + version: AppSecPipeline 0.5.0 + tags: + - "Utility" + type: "utility" + description: "AppSecPipeline Utility" + docker: "appsecpipeline/base" + url: https://github.com/OWASP/django-DefectDojo + documentation: http://defectdojo.readthedocs.io/en/latest/ + parameters: + DOJO_ENGAGEMENT_ID: + type: runtime + data_type: int + description: "Engagement id that exists in DefectDojo." + DOJO_DIR: + type: runtime + data_type: string + description: "Directory where the report reside in for importing into DefectDojo." + BUILD_ID: + type: runtime + data_type: string + description: "Build ID from upstream CI/CD." + DOJO_API_KEY: + type: config + data_type: key + description: "Dojo API key." + DOJO_HOST: + type: config + data_type: string + description: "DefectDojo host." + DOJO_PRODUCT_ID: + type: runtime + data_type: int + description: "DefectDojo product id." + DOJO_PROXY: + type: config + data_type: url + description: "Optional proxy for connecting to DefectDojo." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/prepenv/prep_run.py --dir=$DOJO_DIR --api_key=$DOJO_API_KEY --host=$DOJO_HOST --product=$DOJO_PRODUCT_ID" + shell: False + post: + report: + reportname: + junit: + profiles: + all: "--build_id=$BUILD_ID" + close_engagement: "--engagement=$DOJO_ENGAGEMENT_ID --closeengagement" + engagement: "--engagement=$DOJO_ENGAGEMENT_ID" + all_proxy: "--proxy=$DOJO_PROXY --build_id=$BUILD_ID" diff --git a/tools/prepenv/prep_run.py b/tools/prepenv/prep_run.py new file mode 100644 index 0000000..9406b64 --- /dev/null +++ b/tools/prepenv/prep_run.py @@ -0,0 +1,343 @@ +""" +Example written by Aaron Weaver <aaron.weaver@owasp.org> +as part of the OWASP DefectDojo and OWASP AppSec Pipeline Security projects + +Description: CI/CD example for DefectDojo +""" +from defectdojo_api import defectdojo +from datetime import datetime, timedelta +import os, sys +import argparse +import time +import junit_xml_output +import shutil +import yaml + +DEBUG = True + +test_cases = [] + +def junit(toolName, file): + + junit_xml = junit_xml_output.JunitXml(toolName, test_cases, total_tests=None, total_failures=None) + with open(file, 'w') as file: + print "\nWriting Junit test file: junit_dojo.xml" + file.write(junit_xml.dump()) + +def dojo_connection(host, api_key, user, proxy=None): + + if proxy is not None: + proxies = { + 'http': 'http://' + proxy, + 'https': 'http://' + proxy, + } + print proxy + # Instantiate the DefectDojo api wrapper + dd = defectdojo.DefectDojoAPI(host, api_key, user, proxies=proxies, verify_ssl=False, timeout=360, debug=True) + else: + dd = defectdojo.DefectDojoAPI(host, api_key, user, verify_ssl=False, timeout=360, debug=False) + + return dd + +def return_engagement(dd, product_id, user, build_id=None): + engagement_id = None + #Specify the product id + product_id = product_id + user_id = None + start_date = datetime.now() + end_date = start_date+timedelta(days=1) + + users = dd.list_users(user) + + if users.success == False: + print "Error in listing users: " + users.message + print "Exiting...\n" + sys.exit() + else: + user_id = users.data["objects"][0]["id"] + + engagementText = "CI/CD Integration" + if build_id is not None: + engagementText = engagementText + " - Build #" + build_id + + engagement_id = dd.create_engagement(engagementText, product_id, str(user_id), + "In Progress", start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d")) + + print "Engagement ID created: " + str(engagement_id) + + return str(engagement_id) + +def process_findings(dd, engagement_id, dir, build=None): + test_ids = [] + for root, dirs, files in os.walk(dir): + for name in files: + file = os.path.join(os.getcwd(),root, name) + if "processed" not in str(file) and "error" not in str(file): + #Test for file extension + if file.lower().endswith(('.json', '.csv','.txt','.js', '.xml')): + test_id = processFiles(dd, engagement_id, file) + + if test_id is not None: + if str(test_id).isdigit(): + test_ids.append(str(test_id)) + else: + print "Skipped file, extension not supported: " + file + "\n" + return ','.join(test_ids) + +def moveFile(file, success): + path = os.path.dirname(file) + name = os.path.basename(file) + dest = None + + #folder for processed files + processFolder = os.path.join(path,"processed") + if not os.path.exists(processFolder): + os.mkdir(processFolder) + + #folder for error file + errorFolder = os.path.join(path,"error") + if not os.path.exists(errorFolder): + os.mkdir(errorFolder) + + if success == True: + dest = os.path.join(path,processFolder,name) + else: + dest = os.path.join(path,errorFolder,name) + + shutil.move(file, dest) + +def processFiles(dd, engagement_id, file, scanner=None, build=None): + upload_scan = None + scannerName = None + path=os.path.dirname(file) + name = os.path.basename(file) + tool = os.path.basename(path) + tool = tool.lower() + + test_id = None + date = datetime.now() + dojoDate = date.strftime("%Y-%m-%d") + + #Tools without an importer in Dojo; attempted to import as generic + if "generic" in name: + scanner = "Generic Findings Import" + print "Uploading " + tool + " scan: " + file + test_id = dd.upload_scan(engagement_id, scanner, file, "true", dojoDate, build) + if test_id.success == False: + print "An error occured while uploading the scan: " + test_id.message + moveFile(file, False) + else: + print "Succesful upload, TestID: " + str(test_id) + "\n" + moveFile(file, True) + else: + if tool == "burp": + scannerName = "Burp Scan" + elif tool == "nessus": + scannerName = "Nessus Scan" + elif tool == "nmap": + scannerName = "Nmap Scan" + elif tool == "nexpose": + scannerName = "Nexpose Scan" + elif tool == "veracode": + scannerName = "Veracode Scan" + elif tool == "checkmarx": + scannerName = "Checkmarx Scan" + elif tool == "zap": + scannerName = "ZAP Scan" + elif tool == "appspider": + scannerName = "AppSpider Scan" + elif tool == "arachni": + scannerName = "Arachni Scan" + elif tool == "vcg": + scannerName = "VCG Scan" + elif tool == "dependency-check": + scannerName = "Dependency Check Scan" + elif tool == "retirejs": + scannerName = "Retire.js Scan" + elif tool == "nodesecurity": + scannerName = "Node Security Platform Scan" + elif tool == "qualys": + scannerName = "Qualys Scan" + elif tool == "qualyswebapp": + scannerName = "Qualys Webapp Scan" + elif tool == "openvas": + scannerName = "OpenVAS CSV" + elif tool == "snyk": + scannerName = "Snyk Scan" + else: + print "Tool not defined in dojo_ci_cd script: " + tool + + if scannerName is not None: + print "Uploading " + scannerName + " scan: " + file + test_id = dd.upload_scan(engagement_id, scannerName, file, "true", dojoDate, build) + if test_id.success == False: + print "An error occured while uploading the scan: " + test_id.message + moveFile(file, False) + else: + print "Succesful upload, TestID: " + str(test_id) + moveFile(file, True) + + return test_id + +def create_findings(dd, engagement_id, scanner, file, build=None): + # Upload the scanner export + if engagement_id > 0: + print "Uploading scanner data." + date = datetime.now() + + upload_scan = dd.upload_scan(engagement_id, scanner, file, "true", date.strftime("%Y-%m-%d"), build=build) + + if upload_scan.success: + test_id = upload_scan.id() + else: + print upload_scan.message + quit() + +def summary(dd, engagement_id, test_ids, max_critical=0, max_high=0, max_medium=0): + findings = dd.list_findings(engagement_id_in=engagement_id, duplicate="false", active="true", verified="true") + if findings.success: + print"==============================================" + print "Total Number of Vulnerabilities: " + str(findings.data["meta"]["total_count"]) + print"==============================================" + print_findings(sum_severity(findings)) + print + else: + print "An error occurred: " + findings.message + + findings = dd.list_findings(test_id_in=test_ids, duplicate="true") + + if findings.success: + print"==============================================" + print "Total Number of Duplicate Findings: " + str(findings.data["meta"]["total_count"]) + print"==============================================" + print_findings(sum_severity(findings)) + print + """ + #Delay while de-dupes + sys.stdout.write("Sleeping for 30 seconds to wait for dedupe celery process:") + sys.stdout.flush() + for i in range(15): + time.sleep(2) + sys.stdout.write(".") + sys.stdout.flush() + """ + else: + print "An error occurred: " + findings.message + + findings = dd.list_findings(test_id_in=test_ids, duplicate="false", limit=500) + + if findings.success: + if findings.count() > 0: + for finding in findings.data["objects"]: + test_cases.append(junit_xml_output.TestCase(finding["title"] + " Severity: " + finding["severity"], finding["description"],"failure")) + if not os.path.exists("reports"): + os.mkdir("reports") + junit("DefectDojo", "reports/junit_dojo.xml") + + print"\n==============================================" + print "Total Number of New Findings: " + str(findings.data["meta"]["total_count"]) + print"==============================================" + sum_new_findings = sum_severity(findings) + print_findings(sum_new_findings) + print + print"==============================================" + + strFail = None + if max_critical is not None: + if sum_new_findings[4] > max_critical: + strFail = "Build Failed: Max Critical" + if max_high is not None: + if sum_new_findings[3] > max_high: + strFail = strFail + " Max High" + if max_medium is not None: + if sum_new_findings[2] > max_medium: + strFail = strFail + " Max Medium" + if strFail is None: + print "Build Passed!" + else: + print "Build Failed: " + strFail + print"==============================================" + else: + print "An error occurred: " + findings.message + +def sum_severity(findings): + severity = [0,0,0,0,0] + for finding in findings.data["objects"]: + if finding["severity"] == "Critical": + severity[4] = severity[4] + 1 + if finding["severity"] == "High": + severity[3] = severity[3] + 1 + if finding["severity"] == "Medium": + severity[2] = severity[2] + 1 + if finding["severity"] == "Low": + severity[1] = severity[1] + 1 + if finding["severity"] == "Info": + severity[0] = severity[0] + 1 + + return severity + +def print_findings(findings): + print "Critical: " + str(findings[4]) + print "High: " + str(findings[3]) + print "Medium: " + str(findings[2]) + print "Low: " + str(findings[1]) + print "Info: " + str(findings[0]) + +class Main: + if __name__ == "__main__": + parser = argparse.ArgumentParser(description='CI/CD integration for DefectDojo') + parser.add_argument('--host', help="Dojo Hostname", required=True) + parser.add_argument('--api_key', help="API Key: user:guidvalue", required=True) + parser.add_argument('--product', help="Dojo Product ID", required=True) + parser.add_argument('--file', help="Scanner file", required=False) + parser.add_argument('--dir', help="Scanner directory, needs to have the scanner name with the scan file in the folder. Ex: reports/nmap/nmap.csv", required=False, default="reports") + parser.add_argument('--scanner', help="Type of scanner", required=False) + parser.add_argument('--build_id', help="Build ID", required=False) + parser.add_argument('--engagement', help="Engagement ID (optional)", required=False) + parser.add_argument('--closeengagement', help="Close Engagement", required=False, action='store_true') + parser.add_argument('--critical', help="Maximum new critical vulns to pass the build.", required=False) + parser.add_argument('--high', help="Maximum new high vulns to pass the build.", required=False) + parser.add_argument('--medium', help="Maximum new medium vulns to pass the build.", required=False) + parser.add_argument('--proxy', help="Proxy, specify as host:port, ex: localhost:8080") + + #Parse arguments + args = vars(parser.parse_args()) + host = args["host"] + api_key = args["api_key"] + + product_id = args["product"] + file = args["file"] + dir = args["dir"] + scanner = args["scanner"] + engagement_id = args["engagement"] + closeEngagement = args["closeengagement"] + max_critical = args["critical"] + max_high = args["high"] + max_medium = args["medium"] + build_id = args["build_id"] + proxy = args["proxy"] + + if dir is not None: + if ":" not in api_key: + print "API Key not in the correct format, must be: <user>:<guid>" + quit() + apiParsed = api_key.split(':') + user = apiParsed[0] + api_key = apiParsed[1] + dd = dojo_connection(host, api_key, user, proxy) + engagement_id = return_engagement(dd, product_id, user, build_id=build_id) + + data = dict( + DOJO_ENGAGEMENT_ID = engagement_id + ) + + yamlLoc = os.path.join(os.path.join(dir,"prepenv"),"appruntime.yaml") + with open(yamlLoc, 'w') as outfile: + yaml.dump(data, outfile, default_flow_style=False) + + #Close the engagement + if closeEngagement == True: + dd.close_engagement(engagement_id) + + else: + print "No file or directory to scan specified." diff --git a/tools/retirejs/config.yaml b/tools/retirejs/config.yaml new file mode 100644 index 0000000..d3e9d52 --- /dev/null +++ b/tools/retirejs/config.yaml @@ -0,0 +1,28 @@ +retirejs: + version: AppSecPipeline 0.5.0 + tags: + - "Components with known Vulnerabilities" + type: "static" + description: "There is a plethora of JavaScript libraries for use on the Web and in Node.JS apps out there. This greatly simplifies development,but we need to stay up-to-date on security fixes. Using 'Components with Known Vulnerabilities' is now a part of the OWASP Top 10 list of security risks and insecure libraries can pose a huge risk to your Web app. The goal of Retire.js is to help you detect the use of JS-library versions with known vulnerabilities." + docker: "appsecpipeline/node" + url: https://retirejs.github.io/retire.js/ + documentation: https://github.com/RetireJS/retire.js + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + pre: + exec: "retire" + shell: False + post: + report: "--outputpath {reportname} --outputformat json" + reportname: "{timestamp}.json" + junit: + languages: + - "javascript" + - "nodejs" + profiles: + #Runs the full dependency scan + all: "--path $LOC" diff --git a/tools/snyk/README.md b/tools/snyk/README.md new file mode 100644 index 0000000..a4233ad --- /dev/null +++ b/tools/snyk/README.md @@ -0,0 +1,12 @@ +snyk wizard + +Now redirecting you to our github auth page, go ahead and log in, +and once the auth is complete, return to this prompt and you’ll +be ready to start using snyk. + +If you can’t wait use this url: +https://snyk.io/login?token=<token> + +Waiting... + +https://snyk.io/account/ diff --git a/tools/snyk/config.yaml b/tools/snyk/config.yaml new file mode 100644 index 0000000..6b91f12 --- /dev/null +++ b/tools/snyk/config.yaml @@ -0,0 +1,33 @@ +snyk: + version: AppSecPipeline 0.5.0 + tags: + - "Static Code Analyzer" + type: "static" + description: "Snyk continuously monitors your application's dependencies and lets you quickly respond when new vulnerabilities are disclosed." + docker: "appsecpipeline/node" + url: https://snyk.io/ + documentation: https://snyk.io/docs/ + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + SNYK_API_TOKEN: + type: config + data_type: api + description: "Synk API token" + commands: + pre: "snyk auth $SNYK_API_TOKEN" + exec: "snyk" + shell: True + post: + report: "--json > {reportname}" + reportname: "{timestamp}.json" + junit: + languages: + - "ruby" + - "nodejs" + - "java" + - "python" + profiles: + all: "test ." diff --git a/tools/spotbugs/config.yaml b/tools/spotbugs/config.yaml new file mode 100644 index 0000000..adee903 --- /dev/null +++ b/tools/spotbugs/config.yaml @@ -0,0 +1,28 @@ +spotbugs: + version: AppSecPipeline 0.5.0 + tags: + - "Static Code Analyzer" + type: "static" + description: "SpotBugs is a program which uses static analysis to look for bugs in Java code." + docker: "appsecpipeline/sast" + url: https://spotbugs.github.io/ + documentation: http://spotbugs-in-kengo-toda.readthedocs.io/en/latest/index.html + parameters: + LOC: + type: runtime + data_type: string + description: "Location of the source code." + commands: + parameters: "COMPILE_LOC : Location of jar file. LOC=/temp/jar" + pre: + exec: "spotbugs -textui" + shell: False + post: + report: "-xml -output {reportname} $COMPILE_LOC" + reportname: "{timestamp}.xml" + junit: + languages: + - "java" + profiles: + #Runs the full bandit scan + all: "-effort:max" diff --git a/tools/ssllabs/config.yaml b/tools/ssllabs/config.yaml new file mode 100644 index 0000000..9745c4f --- /dev/null +++ b/tools/ssllabs/config.yaml @@ -0,0 +1,31 @@ +ssllabs: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "server" + icon-sm: + icon-lg: + description: "This tool is a command-line client for the SSL Labs APIs, designed for automated and/or bulk testing." + url: https://github.com/ssllabs/ssllabs-scan + documentation: https://sourceforge.net/p/ssllabs/mailman/ssllabs-devel/ + docker: "appsecpipeline/base-tools" + parameters: + URL: + type: runtime + data_type: url + description: "URL of the site to scan." + commands: + pre: + exec: "ssllabs-scan" + shell: True + report: "> {reportname}" + reportname: "{timestamp}.json" + post: "python /usr/bin/appsecpipeline/tools/ssllabs/parser.py -f '{reportname}'" + junit: + credentials: + simple: + profiles: + all: "-usecache -verbosity=DEBUG $URL" + quick: " " + grade: "-usecache -grade $URL" diff --git a/tools/ssllabs/parser.py b/tools/ssllabs/parser.py new file mode 100644 index 0000000..8226b36 --- /dev/null +++ b/tools/ssllabs/parser.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +import csv +from datetime import datetime +import json +import re +import argparse +import os + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + jsonFile = args.file + + #Criticality rating + #Grades: https://github.com/ssllabs/research/wiki/SSL-Server-Rating-Guide + #A - Info, B - Medium, C - High, D/F/M/T - Critical + def getCriticalityRating(rating): + criticality = "Info" + if "A" in rating: + criticality = "Info" + elif "B" in rating: + criticality = "Medium" + elif "C" in rating: + criticality = "High" + elif "D" in rating or "F" in rating or "M" in rating or "T" in rating: + criticality = "Critical" + + return criticality + + #Find only the base filname, save as csv + base = os.path.basename(args.file) + + csv_output = open(os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv"), 'w') + csvwriter = csv.writer(csv_output) + + """ + Date: :: + Date of the finding in mm/dd/yyyy format. + Title: :: + Title of the finding + CweId: :: + Cwe identifier, must be an integer value. + Url: :: + Url associated with the finding. + Severity: :: + Severity of the finding. Must be one of Info, Low, Medium, High, or Critical. + Description: :: + Description of the finding. Can be multiple lines if enclosed in double quotes. + Mitigation: :: + Possible Mitigations for the finding. Can be multiple lines if enclosed in double quotes. + Impact: :: + Detailed impact of the finding. Can be multiple lines if enclosed in double quotes. + References: :: + References associated with the finding. Can be multiple lines if enclosed in double quotes. + Active: :: + Indicator if the finding is active. Must be empty, True or False + Verified: :: + Indicator if the finding has been verified. Must be empty, True, or False + FalsePositive: :: + Indicator if the finding is a false positive. Must be empty, True, or False + Duplicate: :: + Indicator if the finding is a duplicate. Must be empty, True, or False + """ + csvwriter.writerow(["Date","Title","CweId","Url","Severity","Description","Mitigation","Impact","References","Active","Verified","FalsePositive","Duplicate"]) + + datestring = datetime.strftime(datetime.now(), '%m/%d/%Y') + title = "SSLLabs Grade:" + with open(jsonFile) as json_data: + data = json.load(json_data) + for host in data: + for endpoints in host["endpoints"]: + + grade = endpoints["grade"] + host = host["host"] + title = "%s %s for %s " % (title, grade, host) + cert = endpoints["details"]["cert"] + description = "%s \n" % title + description = "%sCertifcate Subject: %s\n" % (description, cert["subject"]) + description = "%sIssuer Subject: %s\n" % (description, cert["issuerSubject"]) + description = "%sSignature Algorithm: %s\n" % (description, cert["sigAlg"]) + + cName = "" + for commonNames in cert["commonNames"]: + cName = "%s %s \n" % (cName, commonNames) + + aName = "" + for altNames in cert["altNames"]: + aName = "%s %s \n" % (aName, altNames) + + protoName = "" + for protocols in endpoints["details"]["protocols"]: + protoName = "%s %s %s\n" % (protoName, protocols["name"], protocols["version"]) + + description = "%s\nCommon Names:\n %s\nAlternate Names: \n%s\nProtocols: \n%s" % (description, cName, aName, protoName) + + finding = [] + + #CSV format + + ####### Individual fields ######## + #Date + finding.append(datestring) + + finding.append(title) + + #CweId + finding.append("0") + + finding.append(host) + + #Severity + finding.append(getCriticalityRating(grade)) #Nikto doesn't assign severity, default to low + + #Description + #finding.append('"' + description + '"') + finding.append(description) + + #Mitigation + finding.append("") + + #Impact + finding.append("") + + #References + finding.append("") + + #Active + finding.append("True") + + #Verified + finding.append("True") + + #FalsePositive + finding.append("False") + + #Duplicate + finding.append("False") + + csvwriter.writerow(finding) + + csv_output.close() diff --git a/tools/tenableio/.DS_Store b/tools/tenableio/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0ec545915db503faebcca63b27bf2658180c7f3e GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z4DAe90Z1N%F(jFwA}k>DKxX8m8wMxm=N3T4iJ?Gx zSs2O~QW^3X%24#*c(2RAzyO!a&3AE0DJ@B6V7OMd{}0$`B)Jrvas?R(H!(6y0Bam2 zMnhmU1V%$(Gz3ONU<8K%BgF3<+z@({91Vfd5Eu;scnE;X2L(vmp1}c1H$Z5R6aymz z1Go#o2x_K7buoha0T4AHtspfZtsoktm4Ok&0?UK7GB7fLdH@UzjNonvhz4~@Kr~o8 z10&dG5Ff0afe~yo0|O&OI|Cy`I|C!Mhr$Ta&cF!K&cFz^9qPDIYBU5!LjWEE%n+6U zsQ!0lV8GS?ho~AQM?+vV1cqe@FtWG=yEuU>W$gX~)wQ7dGyzGP5mYBb^nj#6)jGH; nW`YbTO2CvdGBAL&g5<$fF(U&5xF#QM2*5&UlpYNM`iB4jFIOq! literal 0 HcmV?d00001 diff --git a/tools/tenableio/config.yaml b/tools/tenableio/config.yaml new file mode 100644 index 0000000..021a964 --- /dev/null +++ b/tools/tenableio/config.yaml @@ -0,0 +1,42 @@ +tenableio: + version: AppSecPipeline 0.5.0 + tags: + - "Infrastructure Scanner" + type: "infrastructure" + icon-sm: + icon-lg: + description: "Cloud-based Cyber Exposure platform for modern assets - from IT to cloud to IoT and OT." + url: https://www.tenable.com/products/tenable-io + documentation: https://docs.tenable.com/TenableIO.htm + docker: appsecpipeline/base-tools + parameters: + TARGET: + type: runtime + data_type: host + description: "Target hostname or ip address." + TENABLE_TEMPLATE: + type: runtime + data_type: string + description: "Tenable profile to run. (Defined in TenableIO)" + TENABLE_SCAN_NAME: + type: runtime + data_type: string + description: "Name of TenableIO scan." + TENABLE_ACCESS_KEY: + type: config + data_type: key + description: "Access key, generated in the TenableIO GUI." + TENABLE_SECRET_KEY: + type: config + data_type: key + description: "Secret key, generated in the TenableIO GUI." + commands: + pre: + exec: "python /usr/bin/appsecpipeline/tools/tenableio/tenablescan.py --target $TARGET --access_key $TENABLE_ACCESS_KEY --secret_key $TENABLE_SECRET_KEY" + shell: False + report: "--report {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + profiles: + all: " " diff --git a/tools/tenableio/tenablescan.py b/tools/tenableio/tenablescan.py new file mode 100644 index 0000000..2bc0979 --- /dev/null +++ b/tools/tenableio/tenablescan.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +"""tenablescan.py: Scans a hosts on tenable's cloud platform""" + +__author__ = "Aaron Weaver" +__copyright__ = "Copyright 2018, Aaron Weaver" + +import os +import argparse +from datetime import datetime +from time import time + +from tenable_io.api.models import Scan +from tenable_io.api.scans import ScanExportRequest +from tenable_io.client import TenableIOClient +from tenable_io.exceptions import TenableIOApiException + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--target', help='Target IP', required=True) + parser.add_argument('--report', help='Name of report', required=True) + parser.add_argument('--access_key', help='Tenable API access key', required=True) + parser.add_argument('--secret_key', help='Tenable API secret key', required=True) + parser.add_argument('--scan_name', help='Scan Name', required=False, default="API Dynamic Scan") + parser.add_argument('--template', help='Template to utilize.', required=False, default="basic") + + args = parser.parse_args() + + client = TenableIOClient(access_key=args.access_key, secret_key=args.secret_key) + + scan = client.scan_helper.create( + name=args.scan_name, + text_targets=args.target, + template=args.template + ) + + nessus_file = args.report + + scan.launch().download(nessus_file, format=ScanExportRequest.FORMAT_NESSUS) + scan.delete() diff --git a/tools/wpscan/config.yaml b/tools/wpscan/config.yaml new file mode 100644 index 0000000..1eeba84 --- /dev/null +++ b/tools/wpscan/config.yaml @@ -0,0 +1,29 @@ +wpscan: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "WPScan is a black box WordPress vulnerability scanner." + url: https://wpscan.org + documentation: https://github.com/wpscanteam/wpscan + docker: "appsecpipeline/base-tools" + parameters: + URL: + type: runtime + data_type: url + description: "URL of the site to scan." + commands: + pre: + exec: "wpscan --url $URL" + shell: True + report: "--format json --output {reportname}" + reportname: "{timestamp}.json" + post: "python /usr/bin/appsecpipeline/tools/wpscan/parser.py -f {reportname}" + junit: + profiles: + non_intrusive: "--detection-mode passive --random-user-agent" + plugins: "--enumerate p --random-user-agent" + all_enumeration: "--enumerate p --detection-mode mixed --random-user-agent" diff --git a/tools/wpscan/parser.py b/tools/wpscan/parser.py new file mode 100644 index 0000000..00f8d2d --- /dev/null +++ b/tools/wpscan/parser.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +import csv +from datetime import datetime +import json +import re +import argparse +import os + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + #Command line options + parser.add_argument("-f", "--file", help="File to process", required=True) + args = parser.parse_args() + jsonFile = args.file + + def findingHeaderInfo(version): + headerInfo = "" + + if "number" in version: + headerInfo += "Version: %s\n" % version["number"] + + if "confidence" in version: + headerInfo += "Confidence: %s\n" % version["confidence"] + + if "interesting_entries" in version: + if len(version["interesting_entries"]) > 0: + headerInfo += "Interesting Entries: \n" + for entries in version["interesting_entries"]: + headerInfo += "%s\n" % entries + + return headerInfo + + def findingInfo(findingType, csvwriter, host, headerInfo, findings): + datestring = datetime.strftime(datetime.now(), '%m/%d/%Y') + + for finding in findings: + csvFinding = [] + findingData = "" + refData = "" + title = "WPScan: " + severity = "Info" + + findingData += headerInfo + title += "(%s) - " % findingType + if findingType == "WP Finding": + severity = "Medium" + elif findingType == "Plugin": + severity = "Low" + + if "title" in finding: + title += "%s\n" % finding["title"] + if "XSS" in title: + severity = "High" + if "SQL" in title: + severity = "Critical" + else: + title += "%s\n" % finding["found_by"] + + findingData += "%s\n" % title + + if "fixed_in" in finding: + findingData += "Fixed In: %s\n" % finding["fixed_in"] + + if "url" in finding: + findingData += "URL: %s\n" % finding["url"] + + if "found_by" in finding: + findingData += "Found by: %s\n" % finding["found_by"] + + if "confidence" in finding: + findingData += "Confidence: %s\n" % finding["confidence"] + + if "interesting_entries" in finding: + if len(finding["interesting_entries"]) > 0: + findingData += "Interesting Entries: \n" + for entries in finding["interesting_entries"]: + findingData += "%s\n" % entries + + if "comfirmed_by" in finding: + if len(finding["confirmed_by"]) > 0: + findingData += "Confirmed By: \n" + for confirmed_by in finding["confirmed_by"]: + findingData += "%s\n" % confirmed_by + + if len(finding["references"]) > 0: + #refData += "References: \n" + for ref in finding["references"]: + refData += "%s:\n" % ref + for item in finding["references"][ref]: + refData += "%s\n" % item + + ####### Individual fields ######## + #Date + csvFinding.append(datestring) + + csvFinding.append(title) + + #CweId + csvFinding.append("0") + + csvFinding.append(host) + + #Severity + csvFinding.append(severity) #Nikto doesn't assign severity, default to low + + #Description + csvFinding.append(findingData) + + #Mitigation + csvFinding.append("") + + #Impact + csvFinding.append("") + + #References + csvFinding.append(refData) + + #Active + csvFinding.append("True") + + #Verified + csvFinding.append("True") + + #FalsePositive + csvFinding.append("False") + + #Duplicate + csvFinding.append("False") + + csvwriter.writerow(csvFinding) + + #Find only the base filname, save as csv + base = os.path.basename(args.file) + + csv_output = open(os.path.join(os.path.dirname(args.file), "generic_" + os.path.splitext(base)[0] + ".csv"), 'w') + csvwriter = csv.writer(csv_output) + + """ + Date: :: + Date of the finding in mm/dd/yyyy format. + Title: :: + Title of the finding + CweId: :: + Cwe identifier, must be an integer value. + Url: :: + Url associated with the finding. + Severity: :: + Severity of the finding. Must be one of Info, Low, Medium, High, or Critical. + Description: :: + Description of the finding. Can be multiple lines if enclosed in double quotes. + Mitigation: :: + Possible Mitigations for the finding. Can be multiple lines if enclosed in double quotes. + Impact: :: + Detailed impact of the finding. Can be multiple lines if enclosed in double quotes. + References: :: + References associated with the finding. Can be multiple lines if enclosed in double quotes. + Active: :: + Indicator if the finding is active. Must be empty, True or False + Verified: :: + Indicator if the finding has been verified. Must be empty, True, or False + FalsePositive: :: + Indicator if the finding is a false positive. Must be empty, True, or False + Duplicate: :: + Indicator if the finding is a duplicate. Must be empty, True, or False + """ + csvwriter.writerow(["Date","Title","CweId","Url","Severity","Description","Mitigation","Impact","References","Active","Verified","FalsePositive","Duplicate"]) + + with open(jsonFile) as json_data: + data = json.load(json_data) + finding = [] + endpoint = data["target_url"] + for item in data: + if item == "interesting_findings": + interesting_findings = data["interesting_findings"] + findingInfo("Interesting Finding", csvwriter,endpoint, "", data["interesting_findings"]) + + if data == "version": + findingInfo("WP Finding", csvwriter,endpoint,findingHeaderInfo(data["version"]), data["version"]["vulnerabilities"]) + + if item == "plugins": + plugins = data[item] + for plugin in plugins: + findingInfo("Plugin", csvwriter,endpoint, "", plugins[plugin]["vulnerabilities"]) + + csv_output.close() diff --git a/tools/zap/config.yaml b/tools/zap/config.yaml new file mode 100644 index 0000000..0bcf600 --- /dev/null +++ b/tools/zap/config.yaml @@ -0,0 +1,46 @@ +zap: + version: AppSecPipeline 0.5.0 + tags: + - "Dyanmic Scanner" + type: "dynamic" + scan_type: "web" + icon-sm: + icon-lg: + description: "The OWASP Zed Attack Proxy (ZAP) is one of the world’s most popular free security tools and is actively maintained by hundreds of international volunteers*. It can help you automatically find security vulnerabilities in your web applications while you are developing and testing your applications. Its also a great tool for experienced pentesters to use for manual security testing." + url: https://github.com/zaproxy/zaproxy + documentation: https://github.com/zaproxy/zaproxy/wiki + docker: "appsecpipeline/zap" + parameters: + URL: + type: runtime + data_type: url + description: "URL of host to scan." + LOGIN_URL: + type: runtime + data_type: url + description: "Login URL of host to scan." + LOGIN_PARMS: + type: runtime + data_type: string + description: "Login paramaters in the format of username=user&password=password." + LOGIN_SUCCESS: + type: runtime + data_type: string + description: "Succesful login text to match on." + LOGIN_LOGOUT_PATTERN: + type: runtime + data_type: string + description: "Logout text to avoid." + commands: + pre: + exec: "python /zap/zap-baseline.py -t $URL" + shell: False + report: "-x {reportname}" + reportname: "{timestamp}.xml" + post: + junit: + credentials: + simple: + profiles: + all: "" + quick: " " diff --git a/tools/zap/zap-baseline.py b/tools/zap/zap-baseline.py new file mode 100644 index 0000000..3103658 --- /dev/null +++ b/tools/zap/zap-baseline.py @@ -0,0 +1,438 @@ +#!/usr/bin/env python +# Zed Attack Proxy (ZAP) and its related class files. +# +# ZAP is an HTTP/HTTPS proxy for assessing web application security. +# +# Copyright 2016 ZAP Development Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script runs a baseline scan against a target URL using ZAP +# +# It can either be run 'standalone', in which case depends on +# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run +# inside one of the ZAP docker containers. It automatically detects if it is +# running in docker so the parameters are the same. +# +# By default it will spider the target URL for one minute, but you can change +# that via the -m parameter. +# It will then wait for the passive scanning to finish - how long that takes +# depends on the number of pages found. +# It will exit with codes of: +# 0: Success +# 1: At least 1 FAIL +# 2: At least one WARN and no FAILs +# 3: Any other failure +# By default all alerts found by ZAP will be treated as WARNings. +# You can use the -c or -u parameters to specify a configuration file to override +# this. +# You can generate a template configuration file using the -g parameter. You will +# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want +# to be handled differently. +# You can also add your own messages for the rules by appending them after a tab +# at the end of each line. + +import getopt +import logging +import os +import os.path +import sys +import time +from datetime import datetime +from six.moves.urllib.request import urlopen +from zapv2 import ZAPv2 +from zap_common import * + + +config_dict = {} +config_msg = {} +out_of_scope_dict = {} +min_level = 0 + +# Pscan rules that aren't really relevant, eg the examples rules in the alpha set +blacklist = ['-1', '50003', '60000', '60001'] + +# Pscan rules that are being addressed +in_progress_issues = {} + +logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') +# Hide "Starting new HTTP connection" messages +logging.getLogger("requests").setLevel(logging.WARNING) + + +def usage(): + print ('Usage: zap-baseline.py -t <target> [options]') + print (' -t target target URL including the protocol, eg https://www.example.com') + print ('Options:') + print (' -c config_file config file to use to INFO, IGNORE or FAIL warnings') + print (' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings') + print (' -g gen_file generate default config file (all rules set to WARN)') + print (' -m mins the number of minutes to spider for (default 1)') + print (' -r report_html file to write the full ZAP HTML report') + print (' -w report_md file to write the full ZAP Wiki (Markdown) report') + print (' -x report_xml file to write the full ZAP XML report') + print (' -J report_json file to write the full ZAP JSON document') + print (' -a include the alpha passive scan rules as well') + print (' -d show debug messages') + print (' -P specify listen port') + print (' -D delay in seconds to wait for passive scanning ') + print (' -i default rules not in the config file to INFO') + print (' -j use the Ajax spider in addition to the traditional one') + print (' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs') + print (' -n context_file context file which will be loaded prior to spidering the target') + print (' -p progress_file progress file which specifies issues that are being addressed') + print (' -s short output format - dont show PASSes or example URLs') + print (' -T max time in minutes to wait for ZAP to start and the passive scan to run') + print (' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"') + print ('') + print ('For more details see https://github.com/zaproxy/zaproxy/wiki/ZAP-Baseline-Scan') + + +def main(argv): + global min_level + global in_progress_issues + cid = '' + context_file = '' + progress_file = '' + config_file = '' + config_url = '' + generate = '' + mins = 1 + port = 0 + detailed_output = True + report_html = '' + report_md = '' + report_xml = '' + report_json = '' + target = '' + zap_alpha = False + info_unspecified = False + ajax = False + base_dir = '' + zap_ip = 'localhost' + zap_options = '' + delay = 0 + timeout = 0 + + pass_count = 0 + warn_count = 0 + fail_count = 0 + info_count = 0 + ignore_count = 0 + warn_inprog_count = 0 + fail_inprog_count = 0 + + check_zap_client_version() + + try: + opts, args = getopt.getopt(argv, "t:c:u:g:m:n:r:J:w:x:l:daijp:sz:P:D:T:") + except getopt.GetoptError as exc: + logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg) + usage() + sys.exit(3) + + for opt, arg in opts: + if opt == '-t': + target = arg + logging.debug('Target: ' + target) + elif opt == '-c': + config_file = arg + elif opt == '-u': + config_url = arg + elif opt == '-g': + generate = arg + elif opt == '-d': + logging.getLogger().setLevel(logging.DEBUG) + elif opt == '-m': + mins = int(arg) + elif opt == '-P': + port = int(arg) + elif opt == '-D': + delay = int(arg) + elif opt == '-n': + context_file = arg + elif opt == '-p': + progress_file = arg + elif opt == '-r': + report_html = arg + elif opt == '-J': + report_json = arg + elif opt == '-w': + report_md = arg + elif opt == '-x': + report_xml = arg + elif opt == '-a': + zap_alpha = True + elif opt == '-i': + info_unspecified = True + elif opt == '-j': + ajax = True + elif opt == '-l': + try: + min_level = zap_conf_lvls.index(arg) + except ValueError: + logging.warning('Level must be one of ' + str(zap_conf_lvls)) + usage() + sys.exit(3) + elif opt == '-z': + zap_options = arg + elif opt == '-s': + detailed_output = False + elif opt == '-T': + timeout = int(arg) + + # Check target supplied and ok + if len(target) == 0: + usage() + sys.exit(3) + + if not (target.startswith('http://') or target.startswith('https://')): + logging.warning('Target must start with \'http://\' or \'https://\'') + usage() + sys.exit(3) + + if running_in_docker(): + base_dir = '/opt/appsecpipeline/reports/' + if config_file or generate or report_html or report_xml or report_json or progress_file or context_file: + # Check directory has been mounted + if not os.path.exists(base_dir): + logging.warning('A file based option has been specified but the directory \'/opt/appsecpipeline/reports/\' is not mounted ') + usage() + sys.exit(3) + + # Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option + if port == 0: + port = get_free_port() + + logging.debug('Using port: ' + str(port)) + + if config_file: + # load config file from filestore + with open(base_dir + config_file) as f: + try: + load_config(f, config_dict, config_msg, out_of_scope_dict) + except ValueError as e: + logging.warning(e) + sys.exit(3) + elif config_url: + # load config file from url + try: + load_config(urlopen(config_url).read().decode('UTF-8'), config_dict, config_msg, out_of_scope_dict) + except ValueError as e: + logging.warning(e) + sys.exit(3) + except: + logging.warning('Failed to read configs from ' + config_url) + sys.exit(3) + + if progress_file: + # load progress file from filestore + with open(base_dir + progress_file) as f: + progress = json.load(f) + # parse into something more useful... + # in_prog_issues = map of vulnid -> {object with everything in} + for issue in progress["issues"]: + if issue["state"] == "inprogress": + in_progress_issues[issue["id"]] = issue + + if running_in_docker(): + try: + params = [ + '-config', 'spider.maxDuration=' + str(mins), + '-addonupdate', + '-addoninstall', 'pscanrulesBeta'] # In case we're running in the stable container + + if zap_alpha: + params.append('-addoninstall') + params.append('pscanrulesAlpha') + + if zap_options: + for zap_opt in zap_options.split(" "): + params.append(zap_opt) + + start_zap(port, params) + + except OSError: + logging.warning('Failed to start ZAP :(') + sys.exit(3) + + else: + # Not running in docker, so start one + mount_dir = '' + if context_file: + mount_dir = os.path.dirname(os.path.abspath(context_file)) + + params = [ + '-config', 'spider.maxDuration=' + str(mins), + '-addonupdate'] + + if (zap_alpha): + params.extend(['-addoninstall', 'pscanrulesAlpha']) + + if zap_options: + for zap_opt in zap_options.split(" "): + params.append(zap_opt) + + try: + cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir) + zap_ip = ipaddress_for_cid(cid) + logging.debug('Docker ZAP IP Addr: ' + zap_ip) + except OSError: + logging.warning('Failed to start ZAP in docker :(') + sys.exit(3) + + try: + zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)}) + + wait_for_zap_start(zap, timeout * 60) + + if context_file: + # handle the context file, cant use base_dir as it might not have been set up + res = zap.context.import_context('/zap/wrk/' + os.path.basename(context_file)) + if res.startswith("ZAP Error"): + logging.error('Failed to load context file ' + context_file + ' : ' + res) + + zap_access_target(zap, target) + + if target.count('/') > 2: + # The url can include a valid path, but always reset to spider the host + target = target[0:target.index('/', 8)+1] + + time.sleep(2) + + # Spider target + zap_spider(zap, target) + + if (ajax): + zap_ajax_spider(zap, target, mins) + + if (delay): + start_scan = datetime.now() + while ((datetime.now() - start_scan).seconds < delay): + time.sleep(5) + logging.debug('Delay passive scan check ' + str(delay - (datetime.now() - start_scan).seconds) + ' seconds') + + zap_wait_for_passive_scan(zap, timeout * 60) + + # Print out a count of the number of urls + num_urls = len(zap.core.urls) + if num_urls == 0: + logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container') + else: + if detailed_output: + print('Total of ' + str(num_urls) + ' URLs') + + alert_dict = zap_get_alerts(zap, target, blacklist, out_of_scope_dict) + + all_rules = zap.pscan.scanners + all_dict = {} + for rule in all_rules: + plugin_id = rule.get('id') + if plugin_id in blacklist: + continue + all_dict[plugin_id] = rule.get('name') + + if generate: + # Create the config file + with open(base_dir + generate, 'w') as f: + f.write('# zap-baseline rule configuration file\n') + f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n') + f.write('# Only the rule identifiers are used - the names are just for info\n') + f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n') + for key, rule in sorted(all_dict.items()): + f.write(key + '\tWARN\t(' + rule + ')\n') + + # print out the passing rules + pass_dict = {} + for rule in all_rules: + plugin_id = rule.get('id') + if plugin_id in blacklist: + continue + if (plugin_id not in alert_dict): + pass_dict[plugin_id] = rule.get('name') + + if min_level == zap_conf_lvls.index("PASS") and detailed_output: + for key, rule in sorted(pass_dict.items()): + print('PASS: ' + rule + ' [' + key + ']') + + pass_count = len(pass_dict) + + # print out the ignored rules + ignore_count, not_used = print_rules(alert_dict, 'IGNORE', config_dict, config_msg, min_level, + inc_ignore_rules, True, detailed_output, {}) + + # print out the info rules + info_count, not_used = print_rules(alert_dict, 'INFO', config_dict, config_msg, min_level, + inc_info_rules, info_unspecified, detailed_output, in_progress_issues) + + # print out the warning rules + warn_count, warn_inprog_count = print_rules(alert_dict, 'WARN', config_dict, config_msg, min_level, + inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues) + + # print out the failing rules + fail_count, fail_inprog_count = print_rules(alert_dict, 'FAIL', config_dict, config_msg, min_level, + inc_fail_rules, True, detailed_output, in_progress_issues) + + if report_html: + # Save the report + write_report(base_dir + report_html, zap.core.htmlreport()) + + if report_json: + # Save the report + write_report(base_dir + report_json, zap._request_other(zap.base_other + 'core/other/jsonreport/')) + + if report_md: + # Save the report + write_report(base_dir + report_md, zap.core.mdreport()) + + if report_xml: + # Save the report + write_report(report_xml, zap.core.xmlreport()) + + print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) + + '\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) + + '\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: ' + str(pass_count)) + + # Stop ZAP + zap.core.shutdown() + + except IOError as e: + if hasattr(e, 'args') and len(e.args) > 1: + errno, strerror = e.args + print("ERROR " + str(strerror)) + logging.warning('I/O error(' + str(errno) + '): ' + str(strerror)) + else: + print("ERROR %s" % e) + logging.warning('I/O error: ' + str(e)) + dump_log_file(cid) + + except: + print("ERROR " + str(sys.exc_info()[0])) + logging.warning('Unexpected error: ' + str(sys.exc_info()[0])) + dump_log_file(cid) + + if not running_in_docker(): + stop_docker(cid) + + if fail_count > 0: + sys.exit(1) + elif warn_count > 0: + sys.exit(2) + elif pass_count > 0: + sys.exit(0) + else: + sys.exit(3) + + +if __name__ == "__main__": + main(sys.argv[1:]) -- GitLab