1 /*
2 * Copyright (c) 2012-2019 The ANTLR Project. All rights reserved.
3 * Use of this file is governed by the BSD 3-clause license that
4 * can be found in the LICENSE.txt file in the project root.
5 */6 7 moduleantlr.v4.runtime.atn.ATNSimulator;
8 9 importantlr.v4.runtime.UnsupportedOperationException;
10 importantlr.v4.runtime.atn.ATN;
11 importantlr.v4.runtime.atn.ATNDeserializer;
12 importantlr.v4.runtime.atn.ATNConfigSet;
13 importantlr.v4.runtime.atn.InterfaceATNSimulator;
14 importantlr.v4.runtime.atn.PredictionContext;
15 importantlr.v4.runtime.atn.PredictionContextCache;
16 importantlr.v4.runtime.dfa.DFAState;
17 importstd.uuid;
18 19 /**
20 * ATN simulator base class
21 */22 abstractclassATNSimulator : InterfaceATNSimulator23 {
24 25 publicstaticintSERIALIZED_VERSION;
26 27 /**
28 * This is the current serialized UUID.
29 * deprecated Use {@link ATNDeserializer#checkCondition(boolean)} instead.
30 */31 publicstaticUUIDSERIALIZED_UUID;
32 33 /**
34 * Must distinguish between missing edge and edge we know leads nowhere
35 */36 publicstaticDFAStateERROR;
37 38 publicATNatn;
39 40 /**
41 * The context cache maps all PredictionContext objects that are equals()
42 * to a single cached copy. This cache is shared across all contexts
43 * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
44 * to use only cached nodes/graphs in addDFAState(). We don't want to
45 * fill this during closure() since there are lots of contexts that
46 * pop up but are not used ever again. It also greatly slows down closure().
47 *
48 * <p>This cache makes a huge difference in memory and a little bit in speed.
49 * For the Java grammar on java.*, it dropped the memory requirements
50 * at the end from 25M to 16M. We don't store any of the full context
51 * graphs in the DFA because they are limited to local context only,
52 * but apparently there's a lot of repetition there as well. We optimize
53 * the config contexts before storing the config set in the DFA states
54 * by literally rebuilding them with cached subgraphs only.</p>
55 *
56 * <p>I tried a cache for use during closure operations, that was
57 * whacked after each adaptivePredict(). It cost a little bit
58 * more time I think and doesn't save on the overall footprint
59 * so it's not worth the complexity.</p>
60 */61 publicPredictionContextCachesharedContextCache;
62 63 publicstaticthis()
64 {
65 SERIALIZED_VERSION = ATNDeserializer.SERIALIZED_VERSION;
66 SERIALIZED_UUID = ATNDeserializer.SERIALIZED_UUID;
67 ERROR = newDFAState(newATNConfigSet());
68 ERROR.stateNumber = int.max;
69 }
70 71 publicthis(ATNatn, PredictionContextCachesharedContextCache)
72 {
73 this.atn = atn;
74 this.sharedContextCache = sharedContextCache;
75 }
76 77 abstractpublicvoidreset();
78 79 /**
80 * Clear the DFA cache used by the current instance. Since the DFA cache may
81 * be shared by multiple ATN simulators, this method may affect the
82 * performance (but not accuracy) of other parsers which are being used
83 * concurrently.
84 *
85 * @throws UnsupportedOperationException if the current instance does not
86 * support clearing the DFA.
87 */88 publicvoidclearDFA()
89 {
90 thrownewUnsupportedOperationException("This ATN simulator does not support clearing the DFA.");
91 }
92 93 publicPredictionContextCachegetSharedContextCache()
94 {
95 returnsharedContextCache;
96 }
97 98 publicPredictionContextgetCachedContext(PredictionContextcontext)
99 {
100 if (sharedContextCacheisnull)
101 returncontext;
102 PredictionContext[PredictionContext] visited;
103 returnPredictionContext.getCachedContext(context,
104 sharedContextCache,
105 visited);
106 }
107 108 }