wikimedia /
mediawiki
This project does not seem to handle request data directly as such no vulnerable execution paths were found.
include, or for example
via PHP's auto-loading mechanism.
These results are based on our legacy PHP analysis, consider migrating to our new PHP analysis engine instead. Learn more
| 1 | <?php |
||
| 2 | /** |
||
| 3 | * Search index updater |
||
| 4 | * |
||
| 5 | * See deferred.txt |
||
| 6 | * |
||
| 7 | * This program is free software; you can redistribute it and/or modify |
||
| 8 | * it under the terms of the GNU General Public License as published by |
||
| 9 | * the Free Software Foundation; either version 2 of the License, or |
||
| 10 | * (at your option) any later version. |
||
| 11 | * |
||
| 12 | * This program is distributed in the hope that it will be useful, |
||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||
| 15 | * GNU General Public License for more details. |
||
| 16 | * |
||
| 17 | * You should have received a copy of the GNU General Public License along |
||
| 18 | * with this program; if not, write to the Free Software Foundation, Inc., |
||
| 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
||
| 20 | * http://www.gnu.org/copyleft/gpl.html |
||
| 21 | * |
||
| 22 | * @file |
||
| 23 | * @ingroup Search |
||
| 24 | */ |
||
| 25 | |||
| 26 | use MediaWiki\MediaWikiServices; |
||
| 27 | |||
| 28 | /** |
||
| 29 | * Database independant search index updater |
||
| 30 | * |
||
| 31 | * @ingroup Search |
||
| 32 | */ |
||
| 33 | class SearchUpdate implements DeferrableUpdate { |
||
| 34 | /** @var int Page id being updated */ |
||
| 35 | private $id = 0; |
||
| 36 | |||
| 37 | /** @var Title Title we're updating */ |
||
| 38 | private $title; |
||
| 39 | |||
| 40 | /** @var Content|bool Content of the page (not text) */ |
||
| 41 | private $content; |
||
| 42 | |||
| 43 | /** @var WikiPage **/ |
||
| 44 | private $page; |
||
| 45 | |||
| 46 | /** |
||
| 47 | * Constructor |
||
| 48 | * |
||
| 49 | * @param int $id Page id to update |
||
| 50 | * @param Title|string $title Title of page to update |
||
| 51 | * @param Content|string|bool $c Content of the page to update. Default: false. |
||
| 52 | * If a Content object, text will be gotten from it. String is for back-compat. |
||
| 53 | * Passing false tells the backend to just update the title, not the content |
||
| 54 | */ |
||
| 55 | public function __construct( $id, $title, $c = false ) { |
||
| 56 | if ( is_string( $title ) ) { |
||
| 57 | $nt = Title::newFromText( $title ); |
||
| 58 | } else { |
||
| 59 | $nt = $title; |
||
| 60 | } |
||
| 61 | |||
| 62 | if ( $nt ) { |
||
| 63 | $this->id = $id; |
||
| 64 | // is_string() check is back-compat for ApprovedRevs |
||
| 65 | if ( is_string( $c ) ) { |
||
| 66 | $this->content = new TextContent( $c ); |
||
| 67 | } else { |
||
| 68 | $this->content = $c ?: false; |
||
| 69 | } |
||
| 70 | $this->title = $nt; |
||
| 71 | } else { |
||
| 72 | wfDebug( "SearchUpdate object created with invalid title '$title'\n" ); |
||
| 73 | } |
||
| 74 | } |
||
| 75 | |||
| 76 | /** |
||
| 77 | * Perform actual update for the entry |
||
| 78 | */ |
||
| 79 | public function doUpdate() { |
||
| 80 | $config = MediaWikiServices::getInstance()->getSearchEngineConfig(); |
||
| 81 | |||
| 82 | if ( $config->getConfig()->get( 'DisableSearchUpdate' ) || !$this->id ) { |
||
| 83 | return; |
||
| 84 | } |
||
| 85 | |||
| 86 | $seFactory = MediaWikiServices::getInstance()->getSearchEngineFactory(); |
||
| 87 | foreach ( $config->getSearchTypes() as $type ) { |
||
| 88 | $search = $seFactory->create( $type ); |
||
| 89 | if ( !$search->supports( 'search-update' ) ) { |
||
| 90 | continue; |
||
| 91 | } |
||
| 92 | |||
| 93 | $normalTitle = $this->getNormalizedTitle( $search ); |
||
| 94 | |||
| 95 | if ( $this->getLatestPage() === null ) { |
||
| 96 | $search->delete( $this->id, $normalTitle ); |
||
| 97 | continue; |
||
| 98 | } elseif ( $this->content === false ) { |
||
| 99 | $search->updateTitle( $this->id, $normalTitle ); |
||
| 100 | continue; |
||
| 101 | } |
||
| 102 | |||
| 103 | $text = $search->getTextFromContent( $this->title, $this->content ); |
||
|
0 ignored issues
–
show
|
|||
| 104 | if ( !$search->textAlreadyUpdatedForIndex() ) { |
||
| 105 | $text = $this->updateText( $text, $search ); |
||
| 106 | } |
||
| 107 | |||
| 108 | # Perform the actual update |
||
| 109 | $search->update( $this->id, $normalTitle, $search->normalizeText( $text ) ); |
||
| 110 | } |
||
| 111 | } |
||
| 112 | |||
| 113 | /** |
||
| 114 | * Clean text for indexing. Only really suitable for indexing in databases. |
||
| 115 | * If you're using a real search engine, you'll probably want to override |
||
| 116 | * this behavior and do something nicer with the original wikitext. |
||
| 117 | * @param string $text |
||
| 118 | * @param SearchEngine $se Search engine |
||
| 119 | * @return string |
||
| 120 | */ |
||
| 121 | public function updateText( $text, SearchEngine $se = null ) { |
||
| 122 | global $wgContLang; |
||
| 123 | |||
| 124 | # Language-specific strip/conversion |
||
| 125 | $text = $wgContLang->normalizeForSearch( $text ); |
||
| 126 | $se = $se ?: MediaWikiServices::getInstance()->newSearchEngine(); |
||
| 127 | $lc = $se->legalSearchChars() . '&#;'; |
||
| 128 | |||
| 129 | $text = preg_replace( "/<\\/?\\s*[A-Za-z][^>]*?>/", |
||
| 130 | ' ', $wgContLang->lc( " " . $text . " " ) ); # Strip HTML markup |
||
| 131 | $text = preg_replace( "/(^|\\n)==\\s*([^\\n]+)\\s*==(\\s)/sD", |
||
| 132 | "\\1\\2 \\2 \\2\\3", $text ); # Emphasize headings |
||
| 133 | |||
| 134 | # Strip external URLs |
||
| 135 | $uc = "A-Za-z0-9_\\/:.,~%\\-+&;#?!=()@\\x80-\\xFF"; |
||
| 136 | $protos = "http|https|ftp|mailto|news|gopher"; |
||
| 137 | $pat = "/(^|[^\\[])({$protos}):[{$uc}]+([^{$uc}]|$)/"; |
||
| 138 | $text = preg_replace( $pat, "\\1 \\3", $text ); |
||
| 139 | |||
| 140 | $p1 = "/([^\\[])\\[({$protos}):[{$uc}]+]/"; |
||
| 141 | $p2 = "/([^\\[])\\[({$protos}):[{$uc}]+\\s+([^\\]]+)]/"; |
||
| 142 | $text = preg_replace( $p1, "\\1 ", $text ); |
||
| 143 | $text = preg_replace( $p2, "\\1 \\3 ", $text ); |
||
| 144 | |||
| 145 | # Internal image links |
||
| 146 | $pat2 = "/\\[\\[image:([{$uc}]+)\\.(gif|png|jpg|jpeg)([^{$uc}])/i"; |
||
| 147 | $text = preg_replace( $pat2, " \\1 \\3", $text ); |
||
| 148 | |||
| 149 | $text = preg_replace( "/([^{$lc}])([{$lc}]+)]]([a-z]+)/", |
||
| 150 | "\\1\\2 \\2\\3", $text ); # Handle [[game]]s |
||
| 151 | |||
| 152 | # Strip all remaining non-search characters |
||
| 153 | $text = preg_replace( "/[^{$lc}]+/", " ", $text ); |
||
| 154 | |||
| 155 | /** |
||
| 156 | * Handle 's, s' |
||
| 157 | * |
||
| 158 | * $text = preg_replace( "/([{$lc}]+)'s /", "\\1 \\1's ", $text ); |
||
| 159 | * $text = preg_replace( "/([{$lc}]+)s' /", "\\1s ", $text ); |
||
| 160 | * |
||
| 161 | * These tail-anchored regexps are insanely slow. The worst case comes |
||
| 162 | * when Japanese or Chinese text (ie, no word spacing) is written on |
||
| 163 | * a wiki configured for Western UTF-8 mode. The Unicode characters are |
||
| 164 | * expanded to hex codes and the "words" are very long paragraph-length |
||
| 165 | * monstrosities. On a large page the above regexps may take over 20 |
||
| 166 | * seconds *each* on a 1GHz-level processor. |
||
| 167 | * |
||
| 168 | * Following are reversed versions which are consistently fast |
||
| 169 | * (about 3 milliseconds on 1GHz-level processor). |
||
| 170 | */ |
||
| 171 | $text = strrev( preg_replace( "/ s'([{$lc}]+)/", " s'\\1 \\1", strrev( $text ) ) ); |
||
| 172 | $text = strrev( preg_replace( "/ 's([{$lc}]+)/", " s\\1", strrev( $text ) ) ); |
||
| 173 | |||
| 174 | # Strip wiki '' and ''' |
||
| 175 | $text = preg_replace( "/''[']*/", " ", $text ); |
||
| 176 | |||
| 177 | return $text; |
||
| 178 | } |
||
| 179 | |||
| 180 | /** |
||
| 181 | * Get WikiPage for the SearchUpdate $id using WikiPage::READ_LATEST |
||
| 182 | * and ensure using the same WikiPage object if there are multiple |
||
| 183 | * SearchEngine types. |
||
| 184 | * |
||
| 185 | * Returns null if a page has been deleted or is not found. |
||
| 186 | * |
||
| 187 | * @return WikiPage|null |
||
| 188 | */ |
||
| 189 | private function getLatestPage() { |
||
| 190 | if ( !isset( $this->page ) ) { |
||
| 191 | $this->page = WikiPage::newFromID( $this->id, WikiPage::READ_LATEST ); |
||
| 192 | } |
||
| 193 | |||
| 194 | return $this->page; |
||
| 195 | } |
||
| 196 | |||
| 197 | /** |
||
| 198 | * Get a normalized string representation of a title suitable for |
||
| 199 | * including in a search index |
||
| 200 | * |
||
| 201 | * @param SearchEngine $search |
||
| 202 | * @return string A stripped-down title string ready for the search index |
||
| 203 | */ |
||
| 204 | private function getNormalizedTitle( SearchEngine $search ) { |
||
| 205 | global $wgContLang; |
||
| 206 | |||
| 207 | $ns = $this->title->getNamespace(); |
||
| 208 | $title = $this->title->getText(); |
||
| 209 | |||
| 210 | $lc = $search->legalSearchChars() . '&#;'; |
||
| 211 | $t = $wgContLang->normalizeForSearch( $title ); |
||
| 212 | $t = preg_replace( "/[^{$lc}]+/", ' ', $t ); |
||
| 213 | $t = $wgContLang->lc( $t ); |
||
| 214 | |||
| 215 | # Handle 's, s' |
||
| 216 | $t = preg_replace( "/([{$lc}]+)'s( |$)/", "\\1 \\1's ", $t ); |
||
| 217 | $t = preg_replace( "/([{$lc}]+)s'( |$)/", "\\1s ", $t ); |
||
| 218 | |||
| 219 | $t = preg_replace( "/\\s+/", ' ', $t ); |
||
| 220 | |||
| 221 | if ( $ns == NS_FILE ) { |
||
| 222 | $t = preg_replace( "/ (png|gif|jpg|jpeg|ogg)$/", "", $t ); |
||
| 223 | } |
||
| 224 | |||
| 225 | return $search->normalizeText( trim( $t ) ); |
||
| 226 | } |
||
| 227 | } |
||
| 228 |
If a method or function can return multiple different values and unless you are sure that you only can receive a single value in this context, we recommend to add an additional type check:
If this a common case that PHP Analyzer should handle natively, please let us know by opening an issue.