diff --git a/chipathlon/workflow.py b/chipathlon/workflow.py
index 2dab80b6b80cb7aec0e1c5266fe654b31b204382..5bc9e6442ec5a35b55b926bbaf8d49f949986941 100644
--- a/chipathlon/workflow.py
+++ b/chipathlon/workflow.py
@@ -199,82 +199,6 @@ class Workflow(object):
                 self.jobs[output_file] = download_job
         return
 
-    def _save_result(self, result_file_name, result_type, experiment_sample_ids, control_sample_ids, job_list, markers, ref_genome):
-        """
-            Saves a results file provided:
-            result_file_name -- the result file name in the master file list
-            result_type -- the type of result to insert
-            control_sample_ids -- A list of control sample ids
-            experiment_sample_ids -- A list of experiment sample ids
-            job_list -- The jobs that have been run on this file.
-            markers -- The markers that have been used for this file.
-            ref_genome -- A string that is the reference genome
-            name as defined in the run.yaml.
-
-            This function will generate the correct meta-data yaml
-            file & save job, and add them to the dax.
-        """
-        (basename, ext) = (os.path.splitext(result_file_name))
-        meta_file_name = "%s_%s_meta.yaml" % (basename, ext[1:])
-        if meta_file_name not in self.files:
-            meta_file_path = os.path.join(self.basepath, "input/db_meta", meta_file_name)
-            # Use deepcopy to avoid altering the original markers object
-            meta_info = copy.deepcopy(markers)
-            meta_info["result_type"] = result_type
-            meta_info["control_sample_ids"] = control_sample_ids
-            meta_info["experiment_sample_ids"] = experiment_sample_ids
-            meta_info["ref_genome"] = ref_genome
-            for job_name in job_list:
-                job_arg_dict = self.workflow_jobs[job_name].get_db_arguments()
-                meta_info[job_name] = job_arg_dict
-            with open(meta_file_path, "w") as wh:
-                yaml.dump(meta_info, wh, default_flow_style=False)
-            self._add_file(meta_file_name, meta_file_path, "local")
-            job_inputs = [
-                {"name": "username", "type": "string", "value": self.username},
-                {"name": "password", "type": "string", "value": self.password},
-                {"name": "host", "type": "string", "value": self.host},
-                {"name": result_file_name, "type": "file", "file": self.files[result_file_name]},
-                {"name": meta_file_name, "type": "file", "file": self.files[meta_file_name]}
-            ]
-            save_job = self.workflow_jobs["db_save_result"].create_job(job_inputs, [], [])
-            self.dax.addJob(save_job)
-        return
-
-    def _save_results(self, outputs, ref_genome):
-        for (name, attributes) in outputs.items():
-            if outputs[name].get("save_result") == True:
-                self._save_result(
-                    outputs[name]["file_name"],
-                    outputs[name]["result_type"],
-                    outputs[name]["experiment_sample_ids"],
-                    outputs[name]["control_sample_ids"],
-                    outputs[name]["all_jobs"],
-                    outputs[name]["all_markers"],
-                    ref_genome
-                )
-        return
-
-    def _get_existing_results(self, outputs, ref_genome, output_names):
-        result_list = []
-        for (name, attributes) in outputs.items():
-            if name in output_names and outputs[name].get("save_result") == True:
-                params = outputs[name]["all_markers"]
-                for job_name in outputs[name]["all_jobs"]:
-                    job_arg_dict = self.workflow_jobs[job_name].get_db_arguments()
-                    params[job_name] = job_arg_dict
-                result_list.append(
-                    self.mdb.check_result(
-                        outputs[name]["file_name"],
-                        outputs[name]["control_sample_ids"],
-                        outputs[name]["experiment_sample_ids"],
-                        ref_genome,
-                        outputs[name]["result_type"],
-                        params
-                    )
-                )
-        return result_list
-
     def _remove_unused_jobs(self, remove_jobs, required_jobs):
         for job_name in remove_jobs:
             if job_name not in required_jobs: