1 /* 2 * Copyright (c) 2012-2019 The ANTLR Project. All rights reserved. 3 * Use of this file is governed by the BSD 3-clause license that 4 * can be found in the LICENSE.txt file in the project root. 5 */ 6 7 module antlr.v4.runtime.atn.ATNSimulator; 8 9 import antlr.v4.runtime.UnsupportedOperationException; 10 import antlr.v4.runtime.atn.ATN; 11 import antlr.v4.runtime.atn.ATNDeserializer; 12 import antlr.v4.runtime.atn.InterfaceATNSimulator; 13 import antlr.v4.runtime.atn.PredictionContext; 14 import antlr.v4.runtime.atn.PredictionContextCache; 15 import antlr.v4.runtime.dfa.DFAState; 16 import std.uuid; 17 18 /** 19 * ATN simulator base class 20 */ 21 abstract class ATNSimulator : InterfaceATNSimulator 22 { 23 24 public static int SERIALIZED_VERSION; 25 26 /** 27 * This is the current serialized UUID. 28 * deprecated Use {@link ATNDeserializer#checkCondition(boolean)} instead. 29 */ 30 public static UUID SERIALIZED_UUID; 31 32 /** 33 * Must distinguish between missing edge and edge we know leads nowhere 34 */ 35 public static DFAState ERROR; 36 37 public ATN atn; 38 39 /** 40 * The context cache maps all PredictionContext objects that are equals() 41 * to a single cached copy. This cache is shared across all contexts 42 * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet 43 * to use only cached nodes/graphs in addDFAState(). We don't want to 44 * fill this during closure() since there are lots of contexts that 45 * pop up but are not used ever again. It also greatly slows down closure(). 46 * 47 * <p>This cache makes a huge difference in memory and a little bit in speed. 48 * For the Java grammar on java.*, it dropped the memory requirements 49 * at the end from 25M to 16M. We don't store any of the full context 50 * graphs in the DFA because they are limited to local context only, 51 * but apparently there's a lot of repetition there as well. We optimize 52 * the config contexts before storing the config set in the DFA states 53 * by literally rebuilding them with cached subgraphs only.</p> 54 * 55 * <p>I tried a cache for use during closure operations, that was 56 * whacked after each adaptivePredict(). It cost a little bit 57 * more time I think and doesn't save on the overall footprint 58 * so it's not worth the complexity.</p> 59 */ 60 public PredictionContextCache sharedContextCache; 61 62 public static this() 63 { 64 SERIALIZED_VERSION = ATNDeserializer.SERIALIZED_VERSION; 65 SERIALIZED_UUID = ATNDeserializer.SERIALIZED_UUID; 66 } 67 68 public this(ATN atn, PredictionContextCache sharedContextCache) 69 { 70 this.atn = atn; 71 this.sharedContextCache = sharedContextCache; 72 } 73 74 abstract public void reset(); 75 76 /** 77 * Clear the DFA cache used by the current instance. Since the DFA cache may 78 * be shared by multiple ATN simulators, this method may affect the 79 * performance (but not accuracy) of other parsers which are being used 80 * concurrently. 81 * 82 * @throws UnsupportedOperationException if the current instance does not 83 * support clearing the DFA. 84 */ 85 public void clearDFA() 86 { 87 throw new UnsupportedOperationException("This ATN simulator does not support clearing the DFA."); 88 } 89 90 public PredictionContextCache getSharedContextCache() 91 { 92 return sharedContextCache; 93 } 94 95 public PredictionContext getCachedContext(PredictionContext context) 96 { 97 if (sharedContextCache is null) 98 return context; 99 PredictionContext[PredictionContext] visited; 100 return PredictionContext.getCachedContext(context, 101 sharedContextCache, 102 visited); 103 } 104 105 }