1*37748cd8SNickeau<?php 2*37748cd8SNickeau 3*37748cd8SNickeaudeclare(strict_types=1); 4*37748cd8SNickeau 5*37748cd8SNickeaunamespace Antlr\Antlr4\Runtime\Atn; 6*37748cd8SNickeau 7*37748cd8SNickeauuse Antlr\Antlr4\Runtime\Dfa\DFAState; 8*37748cd8SNickeauuse Antlr\Antlr4\Runtime\PredictionContexts\PredictionContext; 9*37748cd8SNickeauuse Antlr\Antlr4\Runtime\PredictionContexts\PredictionContextCache; 10*37748cd8SNickeau 11*37748cd8SNickeau/** 12*37748cd8SNickeau * The context cache maps all PredictionContext objects that are == 13*37748cd8SNickeau * to a single cached copy. This cache is shared across all contexts 14*37748cd8SNickeau * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet 15*37748cd8SNickeau * to use only cached nodes/graphs in addDFAState(). We don't want to 16*37748cd8SNickeau * fill this during closure() since there are lots of contexts that 17*37748cd8SNickeau * pop up but are not used ever again. It also greatly slows down closure(). 18*37748cd8SNickeau * 19*37748cd8SNickeau * This cache makes a huge difference in memory and a little bit in speed. 20*37748cd8SNickeau * For the Java grammar on java.*, it dropped the memory requirements 21*37748cd8SNickeau * at the end from 25M to 16M. We don't store any of the full context 22*37748cd8SNickeau * graphs in the DFA because they are limited to local context only, 23*37748cd8SNickeau * but apparently there's a lot of repetition there as well. We optimize 24*37748cd8SNickeau * the config contexts before storing the config set in the DFA states 25*37748cd8SNickeau * by literally rebuilding them with cached subgraphs only. 26*37748cd8SNickeau * 27*37748cd8SNickeau * I tried a cache for use during closure operations, that was 28*37748cd8SNickeau * whacked after each adaptivePredict(). It cost a little bit 29*37748cd8SNickeau * more time I think and doesn't save on the overall footprint 30*37748cd8SNickeau * so it's not worth the complexity. 31*37748cd8SNickeau */ 32*37748cd8SNickeauabstract class ATNSimulator 33*37748cd8SNickeau{ 34*37748cd8SNickeau /** @var ATN */ 35*37748cd8SNickeau public $atn; 36*37748cd8SNickeau 37*37748cd8SNickeau /** 38*37748cd8SNickeau * The context cache maps all PredictionContext objects that are equals() 39*37748cd8SNickeau * to a single cached copy. This cache is shared across all contexts 40*37748cd8SNickeau * in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet 41*37748cd8SNickeau * to use only cached nodes/graphs in addDFAState(). We don't want to 42*37748cd8SNickeau * fill this during closure() since there are lots of contexts that 43*37748cd8SNickeau * pop up but are not used ever again. It also greatly slows down closure(). 44*37748cd8SNickeau * 45*37748cd8SNickeau * This cache makes a huge difference in memory and a little bit in speed. 46*37748cd8SNickeau * For the Java grammar on java.*, it dropped the memory requirements 47*37748cd8SNickeau * at the end from 25M to 16M. We don't store any of the full context 48*37748cd8SNickeau * graphs in the DFA because they are limited to local context only, 49*37748cd8SNickeau * but apparently there's a lot of repetition there as well. We optimize 50*37748cd8SNickeau * the config contexts before storing the config set in the DFA states 51*37748cd8SNickeau * by literally rebuilding them with cached subgraphs only. 52*37748cd8SNickeau * 53*37748cd8SNickeau * I tried a cache for use during closure operations, that was 54*37748cd8SNickeau * whacked after each adaptivePredict(). It cost a little bit 55*37748cd8SNickeau * more time I think and doesn't save on the overall footprint 56*37748cd8SNickeau * so it's not worth the complexity. 57*37748cd8SNickeau * 58*37748cd8SNickeau * @var PredictionContextCache 59*37748cd8SNickeau */ 60*37748cd8SNickeau protected $sharedContextCache; 61*37748cd8SNickeau 62*37748cd8SNickeau public function __construct(ATN $atn, PredictionContextCache $sharedContextCache) 63*37748cd8SNickeau { 64*37748cd8SNickeau $this->atn = $atn; 65*37748cd8SNickeau $this->sharedContextCache = $sharedContextCache; 66*37748cd8SNickeau } 67*37748cd8SNickeau 68*37748cd8SNickeau public static function error() : DFAState 69*37748cd8SNickeau { 70*37748cd8SNickeau static $error; 71*37748cd8SNickeau 72*37748cd8SNickeau return $error ?? ($error = new DFAState(new ATNConfigSet(), 0x7FFFFFFF)); 73*37748cd8SNickeau } 74*37748cd8SNickeau 75*37748cd8SNickeau abstract public function reset() : void; 76*37748cd8SNickeau 77*37748cd8SNickeau /** 78*37748cd8SNickeau * Clear the DFA cache used by the current instance. Since the DFA cache 79*37748cd8SNickeau * may be shared by multiple ATN simulators, this method may affect the 80*37748cd8SNickeau * performance (but not accuracy) of other parsers which are being used 81*37748cd8SNickeau * concurrently. 82*37748cd8SNickeau * 83*37748cd8SNickeau * @throws \InvalidArgumentException If the current instance does not 84*37748cd8SNickeau * support clearing the DFA. 85*37748cd8SNickeau */ 86*37748cd8SNickeau public function clearDFA() : void 87*37748cd8SNickeau { 88*37748cd8SNickeau throw new \InvalidArgumentException('This ATN simulator does not support clearing the DFA.'); 89*37748cd8SNickeau } 90*37748cd8SNickeau 91*37748cd8SNickeau public function getSharedContextCache() : PredictionContextCache 92*37748cd8SNickeau { 93*37748cd8SNickeau return $this->sharedContextCache; 94*37748cd8SNickeau } 95*37748cd8SNickeau 96*37748cd8SNickeau public function getCachedContext(PredictionContext $context) : PredictionContext 97*37748cd8SNickeau { 98*37748cd8SNickeau if ($this->sharedContextCache === null) { 99*37748cd8SNickeau return $context; 100*37748cd8SNickeau } 101*37748cd8SNickeau 102*37748cd8SNickeau $visited = []; 103*37748cd8SNickeau 104*37748cd8SNickeau return PredictionContext::getCachedPredictionContext( 105*37748cd8SNickeau $context, 106*37748cd8SNickeau $this->sharedContextCache, 107*37748cd8SNickeau $visited 108*37748cd8SNickeau ); 109*37748cd8SNickeau } 110*37748cd8SNickeau} 111