1 /* 2 * Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. 3 * Use of this file is governed by the BSD 3-clause license that 4 * can be found in the LICENSE.txt file in the project root. 5 */ 6 7 module antlr.v4.runtime.atn.ATNSimulator; 8 9 import std.uuid; 10 import antlr.v4.runtime.UnsupportedOperationException; 11 import antlr.v4.runtime.atn.ATN; 12 import antlr.v4.runtime.atn.ATNDeserializer; 13 import antlr.v4.runtime.atn.InterfaceATNSimulator; 14 import antlr.v4.runtime.dfa.DFAState; 15 import antlr.v4.runtime.atn.PredictionContextCache; 16 import antlr.v4.runtime.atn.PredictionContext; 17 18 // Class ATNSimulator 19 /** 20 * TODO add class description 21 */ 22 abstract class ATNSimulator : InterfaceATNSimulator 23 { 24 25 public static int SERIALIZED_VERSION; 26 27 /** 28 * @uml 29 * This is the current serialized UUID. 30 * deprecated Use {@link ATNDeserializer#checkCondition(boolean)} instead. 31 */ 32 public static UUID SERIALIZED_UUID; 33 34 /** 35 * @uml 36 * Must distinguish between missing edge and edge we know leads nowhere 37 */ 38 public static DFAState ERROR; 39 40 public ATN atn; 41 42 /** 43 * The context cache maps all PredictionContext objects that are equals() 44 * to a single cached copy. This cache is shared across all contexts 45 * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet 46 * to use only cached nodes/graphs in addDFAState(). We don't want to 47 * fill this during closure() since there are lots of contexts that 48 * pop up but are not used ever again. It also greatly slows down closure(). 49 * 50 * <p>This cache makes a huge difference in memory and a little bit in speed. 51 * For the Java grammar on java.*, it dropped the memory requirements 52 * at the end from 25M to 16M. We don't store any of the full context 53 * graphs in the DFA because they are limited to local context only, 54 * but apparently there's a lot of repetition there as well. We optimize 55 * the config contexts before storing the config set in the DFA states 56 * by literally rebuilding them with cached subgraphs only.</p> 57 * 58 * <p>I tried a cache for use during closure operations, that was 59 * whacked after each adaptivePredict(). It cost a little bit 60 * more time I think and doesn't save on the overall footprint 61 * so it's not worth the complexity.</p> 62 */ 63 public PredictionContextCache sharedContextCache; 64 65 public static this() 66 { 67 SERIALIZED_VERSION = ATNDeserializer.SERIALIZED_VERSION; 68 SERIALIZED_UUID = ATNDeserializer.SERIALIZED_UUID; 69 } 70 71 public this(ATN atn, PredictionContextCache sharedContextCache) 72 { 73 this.atn = atn; 74 this.sharedContextCache = sharedContextCache; 75 } 76 77 abstract public void reset(); 78 79 /** 80 * @uml 81 * Clear the DFA cache used by the current instance. Since the DFA cache may 82 * be shared by multiple ATN simulators, this method may affect the 83 * performance (but not accuracy) of other parsers which are being used 84 * concurrently. 85 * 86 * @throws UnsupportedOperationException if the current instance does not 87 * support clearing the DFA. 88 */ 89 public void clearDFA() 90 { 91 throw new UnsupportedOperationException("This ATN simulator does not support clearing the DFA."); 92 } 93 94 public PredictionContextCache getSharedContextCache() 95 { 96 return sharedContextCache; 97 } 98 99 public PredictionContext getCachedContext(PredictionContext context) 100 { 101 if (sharedContextCache is null) 102 return context; 103 PredictionContext[PredictionContext] visited; 104 return PredictionContext.getCachedContext(context, 105 sharedContextCache, 106 visited); 107 } 108 109 /** 110 * @uml 111 * deprecated Use {@link ATNDeserializer#deserialize} instead. 112 */ 113 public ATN deserialize(wstring data) 114 { 115 return new ATNDeserializer().deserialize(data); 116 } 117 118 }