diff --git a/2021/README.md b/2021/README.md
index c31634ce6cf4d0014af6e91e8231a8008ae30b33..300ba54c41b58f686369fa093041f22cf4f57a31 100644
--- a/2021/README.md
+++ b/2021/README.md
@@ -159,9 +159,37 @@ Hmm... part 1 took longer than I think it should have. The big-O complexity is
manageable, but I guess the constant factors are problematic.
For part 2, I'm not even going to try to algorithmically determine which letters
-are created (and not just because I'd have to return soemthing other than a
+are created (and not just because I'd have to return something other than a
long integer).
Hmm... part 2 really didn't take much longer than part 1 did, which suggests
the computational pain point is in parsing the data and creating the "paper"
and not in the folding.
+
+# Day 14
+
+Again, no fancy data structures. Just need a way to store the rules, such as a Map,
+and iterate over a string.
+
+The only interesting piece of Part 2 is trying to trip you up over integer
+overflow. Since I already converted my abstract base class to long integers back
+on Day 6, no big deal. That, and waiting a bit longer for the answer. Quite a bit
+longer, it seems. Is there any place I can optimize? I sure hope so, since I
+just hit an OutOfMemoryError on the *sample data*!
+
+I don't see how I can avoid modeling the full string.
+
+Maybe I can have the StringBuilder work on smaller chunks, thereby allowing for
+more frequent garbage collection? That probably will work. I'm pretty sure that
+StringBuilder has to have an underlying linked list.
+
+That doesn't seem to be speeding things up, but hopefully it'll help with the
+memory problem. (Two hours later...) memory didn't blow up, but the sample data
+isn't finished processing yet. It occurs to me that I don't need to use a
+StringBuilder. I can create two arrays of characters and merge them. Let's see
+how that goes.
+
+Oh, yes. This is *much* faster. Still taking a while to get through part 2, but
+in a few seconds we got past the point that the StringBuilder approach took a
+couple of hours while I was in a meeting. But we still ran out of memory when
+growing the new polymer on iteration 28 of the sample data.
diff --git a/2021/src/main/java/edu/unl/cse/bohn/year2021/Day14.java b/2021/src/main/java/edu/unl/cse/bohn/year2021/Day14.java
new file mode 100644
index 0000000000000000000000000000000000000000..cabe3f2a9ebaec051987756bcbf1fa3322aa017c
--- /dev/null
+++ b/2021/src/main/java/edu/unl/cse/bohn/year2021/Day14.java
@@ -0,0 +1,122 @@
+package edu.unl.cse.bohn.year2021;
+
+import edu.unl.cse.bohn.Puzzle;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+@SuppressWarnings("unused")
+public class Day14 extends Puzzle {
+ private Map<String, Character> rules;
+
+ public Day14(boolean isProductionReady) {
+ super(isProductionReady);
+ sampleData = """
+ NNCB
+
+ CH -> B
+ HH -> N
+ CB -> H
+ NH -> C
+ HB -> C
+ HC -> B
+ HN -> C
+ NN -> C
+ BH -> H
+ NC -> B
+ NB -> B
+ BN -> B
+ BB -> N
+ BC -> B
+ CC -> N
+ CN -> C""";
+ }
+
+ private String parseData(List<String> data) {
+ String initialMolecule = data.get(0);
+ rules = new HashMap<>();
+ for (String rule : data) {
+ if (rule.contains(" -> ")) {
+ String[] halves = rule.split(" -> ");
+ rules.put(halves[0], halves[1].charAt(0));
+ }
+ }
+ return initialMolecule;
+ }
+
+ @SuppressWarnings("CommentedOutCode")
+ private String growMolecule(String molecule) {
+// StringBuilder growingMolecule = new StringBuilder();
+// String pattern = "";
+// for (int i = 0; i < molecule.length() - 1; i++) {
+// pattern = molecule.substring(i, i + 2);
+// growingMolecule.append(pattern.charAt(0)).append(rules.get(pattern));
+// }
+// growingMolecule.append(pattern.charAt(1));
+// return growingMolecule.toString();
+ char[] oldElements = molecule.toCharArray();
+ char[] newElements = new char[2 * molecule.length() - 1];
+ for (int i = 0; i < oldElements.length - 1; i++) {
+ newElements[2 * i] = oldElements[i];
+ newElements[2 * i + 1] = rules.get(molecule.substring(i, i + 2));
+ }
+ newElements[newElements.length - 1] = oldElements[oldElements.length - 1];
+ return new String(newElements);
+ }
+
+ private Map<Character, Long> countElements(String molecule) {
+ Map<Character, Long> counts = new HashMap<>();
+ for (char element : molecule.toCharArray()) {
+ if (!counts.containsKey(element)) {
+ counts.put(element, 0L);
+ }
+ counts.put(element, counts.get(element) + 1);
+ }
+ return counts;
+ }
+
+ private long producePolymer(List<String> data, int numberOfSteps) {
+ String molecule = parseData(data);
+ for (int i = 0; i < numberOfSteps; i++) {
+ System.out.print("Molecular growth (" + i + ")--\toriginal size: " + molecule.length());
+ molecule = growMolecule(molecule);
+ System.out.println("\tnew size: " + molecule.length());
+// if (molecule.length() < 60) {
+// String expectedString = switch (i) {
+// case 0 -> "NCNBCHB";
+// case 1 -> "NBCCNBBBCBHCB";
+// case 2 -> "NBBBCNCCNBBNBNBBCHBHHBCHB";
+// case 3 -> "NBBNBNBBCCNBCNCCNBBNBBNBBBNBBNBBCBHCBHHNHCBBCBHCB";
+// default -> "??";
+// };
+// System.out.println("\texpected: " + expectedString);
+// System.out.println("\t actual: " + molecule);
+// assert (molecule.equals(expectedString));
+// }
+ }
+ Map<Character, Long> elementCounts = countElements(molecule);
+ char leastFrequentElement = molecule.charAt(0);
+ char mostFrequentElement = molecule.charAt(0);
+ for (char element : elementCounts.keySet()) {
+ if (elementCounts.get(element) < elementCounts.get(leastFrequentElement)) {
+ leastFrequentElement = element;
+ }
+ if (elementCounts.get(element) > elementCounts.get(mostFrequentElement)) {
+ mostFrequentElement = element;
+ }
+ }
+ return elementCounts.get(mostFrequentElement) - elementCounts.get(leastFrequentElement);
+ }
+
+ @Override
+ public long computePart1(List<String> data) {
+ return producePolymer(data, 10);
+ }
+
+ @Override
+ public long computePart2(List<String> data) {
+ return producePolymer(data, 40);
+ }
+}