Completed
Branch rebuild (e7a2d2)
by Glenn
08:26
created

LoadTargetBuildFile()   F

Complexity

Conditions 17

Size

Total Lines 114

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
dl 0
loc 114
c 0
b 0
f 0
rs 2
cc 17

How to fix   Long Method    Complexity   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Complexity

Complex classes like LoadTargetBuildFile() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
# Copyright (c) 2012 Google Inc. All rights reserved.
2
# Use of this source code is governed by a BSD-style license that can be
3
# found in the LICENSE file.
4
5
from compiler.ast import Const
6
from compiler.ast import Dict
7
from compiler.ast import Discard
8
from compiler.ast import List
9
from compiler.ast import Module
10
from compiler.ast import Node
11
from compiler.ast import Stmt
12
import compiler
13
import gyp.common
14
import gyp.simple_copy
15
import multiprocessing
16
import optparse
17
import os.path
18
import re
19
import shlex
20
import signal
21
import subprocess
22
import sys
23
import threading
24
import time
25
import traceback
26
from gyp.common import GypError
27
from gyp.common import OrderedSet
28
29
30
# A list of types that are treated as linkable.
31
linkable_types = [
32
  'executable',
33
  'shared_library',
34
  'loadable_module',
35
  'mac_kernel_extension',
36
]
37
38
# A list of sections that contain links to other targets.
39
dependency_sections = ['dependencies', 'export_dependent_settings']
40
41
# base_path_sections is a list of sections defined by GYP that contain
42
# pathnames.  The generators can provide more keys, the two lists are merged
43
# into path_sections, but you should call IsPathSection instead of using either
44
# list directly.
45
base_path_sections = [
46
  'destination',
47
  'files',
48
  'include_dirs',
49
  'inputs',
50
  'libraries',
51
  'outputs',
52
  'sources',
53
]
54
path_sections = set()
55
56
# These per-process dictionaries are used to cache build file data when loading
57
# in parallel mode.
58
per_process_data = {}
59
per_process_aux_data = {}
60
61
def IsPathSection(section):
62
  # If section ends in one of the '=+?!' characters, it's applied to a section
63
  # without the trailing characters.  '/' is notably absent from this list,
64
  # because there's no way for a regular expression to be treated as a path.
65
  while section and section[-1:] in '=+?!':
66
    section = section[:-1]
67
68
  if section in path_sections:
69
    return True
70
71
  # Sections mathing the regexp '_(dir|file|path)s?$' are also
72
  # considered PathSections. Using manual string matching since that
73
  # is much faster than the regexp and this can be called hundreds of
74
  # thousands of times so micro performance matters.
75
  if "_" in section:
76
    tail = section[-6:]
77
    if tail[-1] == 's':
78
      tail = tail[:-1]
79
    if tail[-5:] in ('_file', '_path'):
80
      return True
81
    return tail[-4:] == '_dir'
82
83
  return False
84
85
# base_non_configuration_keys is a list of key names that belong in the target
86
# itself and should not be propagated into its configurations.  It is merged
87
# with a list that can come from the generator to
88
# create non_configuration_keys.
89
base_non_configuration_keys = [
90
  # Sections that must exist inside targets and not configurations.
91
  'actions',
92
  'configurations',
93
  'copies',
94
  'default_configuration',
95
  'dependencies',
96
  'dependencies_original',
97
  'libraries',
98
  'postbuilds',
99
  'product_dir',
100
  'product_extension',
101
  'product_name',
102
  'product_prefix',
103
  'rules',
104
  'run_as',
105
  'sources',
106
  'standalone_static_library',
107
  'suppress_wildcard',
108
  'target_name',
109
  'toolset',
110
  'toolsets',
111
  'type',
112
113
  # Sections that can be found inside targets or configurations, but that
114
  # should not be propagated from targets into their configurations.
115
  'variables',
116
]
117
non_configuration_keys = []
118
119
# Keys that do not belong inside a configuration dictionary.
120
invalid_configuration_keys = [
121
  'actions',
122
  'all_dependent_settings',
123
  'configurations',
124
  'dependencies',
125
  'direct_dependent_settings',
126
  'libraries',
127
  'link_settings',
128
  'sources',
129
  'standalone_static_library',
130
  'target_name',
131
  'type',
132
]
133
134
# Controls whether or not the generator supports multiple toolsets.
135
multiple_toolsets = False
136
137
# Paths for converting filelist paths to output paths: {
138
#   toplevel,
139
#   qualified_output_dir,
140
# }
141
generator_filelist_paths = None
142
143
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
144
  """Return a list of all build files included into build_file_path.
145
146
  The returned list will contain build_file_path as well as all other files
147
  that it included, either directly or indirectly.  Note that the list may
148
  contain files that were included into a conditional section that evaluated
149
  to false and was not merged into build_file_path's dict.
150
151
  aux_data is a dict containing a key for each build file or included build
152
  file.  Those keys provide access to dicts whose "included" keys contain
153
  lists of all other files included by the build file.
154
155
  included should be left at its default None value by external callers.  It
156
  is used for recursion.
157
158
  The returned list will not contain any duplicate entries.  Each build file
159
  in the list will be relative to the current directory.
160
  """
161
162
  if included == None:
163
    included = []
164
165
  if build_file_path in included:
166
    return included
167
168
  included.append(build_file_path)
169
170
  for included_build_file in aux_data[build_file_path].get('included', []):
171
    GetIncludedBuildFiles(included_build_file, aux_data, included)
172
173
  return included
174
175
176
def CheckedEval(file_contents):
177
  """Return the eval of a gyp file.
178
179
  The gyp file is restricted to dictionaries and lists only, and
180
  repeated keys are not allowed.
181
182
  Note that this is slower than eval() is.
183
  """
184
185
  ast = compiler.parse(file_contents)
186
  assert isinstance(ast, Module)
187
  c1 = ast.getChildren()
188
  assert c1[0] is None
189
  assert isinstance(c1[1], Stmt)
190
  c2 = c1[1].getChildren()
191
  assert isinstance(c2[0], Discard)
192
  c3 = c2[0].getChildren()
193
  assert len(c3) == 1
194
  return CheckNode(c3[0], [])
195
196
197
def CheckNode(node, keypath):
198
  if isinstance(node, Dict):
199
    c = node.getChildren()
200
    dict = {}
201
    for n in range(0, len(c), 2):
202
      assert isinstance(c[n], Const)
203
      key = c[n].getChildren()[0]
204
      if key in dict:
205
        raise GypError("Key '" + key + "' repeated at level " +
206
              repr(len(keypath) + 1) + " with key path '" +
207
              '.'.join(keypath) + "'")
208
      kp = list(keypath)  # Make a copy of the list for descending this node.
209
      kp.append(key)
210
      dict[key] = CheckNode(c[n + 1], kp)
211
    return dict
212
  elif isinstance(node, List):
213
    c = node.getChildren()
214
    children = []
215
    for index, child in enumerate(c):
216
      kp = list(keypath)  # Copy list.
217
      kp.append(repr(index))
218
      children.append(CheckNode(child, kp))
219
    return children
220
  elif isinstance(node, Const):
221
    return node.getChildren()[0]
222
  else:
223
    raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
224
         "': " + repr(node))
225
226
227
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
228
                     is_target, check):
229
  if build_file_path in data:
230
    return data[build_file_path]
231
232
  if os.path.exists(build_file_path):
233
    # Open the build file for read ('r') with universal-newlines mode ('U')
234
    # to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
235
    # which otherwise will fail eval()
236
    build_file_contents = open(build_file_path, 'rU').read()
237
  else:
238
    raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
239
240
  build_file_data = None
241
  try:
242
    if check:
243
      build_file_data = CheckedEval(build_file_contents)
244
    else:
245
      build_file_data = eval(build_file_contents, {'__builtins__': None},
246
                             None)
247
  except SyntaxError, e:
248
    e.filename = build_file_path
249
    raise
250
  except Exception, e:
251
    gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
252
    raise
253
254
  if type(build_file_data) is not dict:
255
    raise GypError("%s does not evaluate to a dictionary." % build_file_path)
256
257
  data[build_file_path] = build_file_data
258
  aux_data[build_file_path] = {}
259
260
  # Scan for includes and merge them in.
261
  if ('skip_includes' not in build_file_data or
262
      not build_file_data['skip_includes']):
263
    try:
264
      if is_target:
265
        LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
266
                                      aux_data, includes, check)
267
      else:
268
        LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
269
                                      aux_data, None, check)
270
    except Exception, e:
271
      gyp.common.ExceptionAppend(e,
272
                                 'while reading includes of ' + build_file_path)
273
      raise
274
275
  return build_file_data
276
277
278
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
279
                                  includes, check):
280
  includes_list = []
281
  if includes != None:
282
    includes_list.extend(includes)
283
  if 'includes' in subdict:
284
    for include in subdict['includes']:
285
      # "include" is specified relative to subdict_path, so compute the real
286
      # path to include by appending the provided "include" to the directory
287
      # in which subdict_path resides.
288
      relative_include = \
289
          os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
290
      includes_list.append(relative_include)
291
    # Unhook the includes list, it's no longer needed.
292
    del subdict['includes']
293
294
  # Merge in the included files.
295
  for include in includes_list:
296
    if not 'included' in aux_data[subdict_path]:
297
      aux_data[subdict_path]['included'] = []
298
    aux_data[subdict_path]['included'].append(include)
299
300
    gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
301
302
    MergeDicts(subdict,
303
               LoadOneBuildFile(include, data, aux_data, None, False, check),
304
               subdict_path, include)
305
306
  # Recurse into subdictionaries.
307
  for k, v in subdict.iteritems():
308
    if type(v) is dict:
309
      LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
310
                                    None, check)
311
    elif type(v) is list:
312
      LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
313
                                    check)
314
315
316
# This recurses into lists so that it can look for dicts.
317
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
318
  for item in sublist:
319
    if type(item) is dict:
320
      LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
321
                                    None, check)
322
    elif type(item) is list:
323
      LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
324
325
# Processes toolsets in all the targets. This recurses into condition entries
326
# since they can contain toolsets as well.
327
def ProcessToolsetsInDict(data):
328
  if 'targets' in data:
329
    target_list = data['targets']
330
    new_target_list = []
331
    for target in target_list:
332
      # If this target already has an explicit 'toolset', and no 'toolsets'
333
      # list, don't modify it further.
334
      if 'toolset' in target and 'toolsets' not in target:
335
        new_target_list.append(target)
336
        continue
337
      if multiple_toolsets:
338
        toolsets = target.get('toolsets', ['target'])
339
      else:
340
        toolsets = ['target']
341
      # Make sure this 'toolsets' definition is only processed once.
342
      if 'toolsets' in target:
343
        del target['toolsets']
344
      if len(toolsets) > 0:
345
        # Optimization: only do copies if more than one toolset is specified.
346
        for build in toolsets[1:]:
347
          new_target = gyp.simple_copy.deepcopy(target)
348
          new_target['toolset'] = build
349
          new_target_list.append(new_target)
350
        target['toolset'] = toolsets[0]
351
        new_target_list.append(target)
352
    data['targets'] = new_target_list
353
  if 'conditions' in data:
354
    for condition in data['conditions']:
355
      if type(condition) is list:
356
        for condition_dict in condition[1:]:
357
          if type(condition_dict) is dict:
358
            ProcessToolsetsInDict(condition_dict)
359
360
361
# TODO(mark): I don't love this name.  It just means that it's going to load
362
# a build file that contains targets and is expected to provide a targets dict
363
# that contains the targets...
364
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
365
                        depth, check, load_dependencies):
366
  # If depth is set, predefine the DEPTH variable to be a relative path from
367
  # this build file's directory to the directory identified by depth.
368
  if depth:
369
    # TODO(dglazkov) The backslash/forward-slash replacement at the end is a
370
    # temporary measure. This should really be addressed by keeping all paths
371
    # in POSIX until actual project generation.
372
    d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
373
    if d == '':
374
      variables['DEPTH'] = '.'
375
    else:
376
      variables['DEPTH'] = d.replace('\\', '/')
377
378
  # The 'target_build_files' key is only set when loading target build files in
379
  # the non-parallel code path, where LoadTargetBuildFile is called
380
  # recursively.  In the parallel code path, we don't need to check whether the
381
  # |build_file_path| has already been loaded, because the 'scheduled' set in
382
  # ParallelState guarantees that we never load the same |build_file_path|
383
  # twice.
384
  if 'target_build_files' in data:
385
    if build_file_path in data['target_build_files']:
386
      # Already loaded.
387
      return False
388
    data['target_build_files'].add(build_file_path)
389
390
  gyp.DebugOutput(gyp.DEBUG_INCLUDES,
391
                  "Loading Target Build File '%s'", build_file_path)
392
393
  build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
394
                                     includes, True, check)
395
396
  # Store DEPTH for later use in generators.
397
  build_file_data['_DEPTH'] = depth
398
399
  # Set up the included_files key indicating which .gyp files contributed to
400
  # this target dict.
401
  if 'included_files' in build_file_data:
402
    raise GypError(build_file_path + ' must not contain included_files key')
403
404
  included = GetIncludedBuildFiles(build_file_path, aux_data)
405
  build_file_data['included_files'] = []
406
  for included_file in included:
407
    # included_file is relative to the current directory, but it needs to
408
    # be made relative to build_file_path's directory.
409
    included_relative = \
410
        gyp.common.RelativePath(included_file,
411
                                os.path.dirname(build_file_path))
412
    build_file_data['included_files'].append(included_relative)
413
414
  # Do a first round of toolsets expansion so that conditions can be defined
415
  # per toolset.
416
  ProcessToolsetsInDict(build_file_data)
417
418
  # Apply "pre"/"early" variable expansions and condition evaluations.
419
  ProcessVariablesAndConditionsInDict(
420
      build_file_data, PHASE_EARLY, variables, build_file_path)
421
422
  # Since some toolsets might have been defined conditionally, perform
423
  # a second round of toolsets expansion now.
424
  ProcessToolsetsInDict(build_file_data)
425
426
  # Look at each project's target_defaults dict, and merge settings into
427
  # targets.
428
  if 'target_defaults' in build_file_data:
429
    if 'targets' not in build_file_data:
430
      raise GypError("Unable to find targets in build file %s" %
431
                     build_file_path)
432
433
    index = 0
434
    while index < len(build_file_data['targets']):
435
      # This procedure needs to give the impression that target_defaults is
436
      # used as defaults, and the individual targets inherit from that.
437
      # The individual targets need to be merged into the defaults.  Make
438
      # a deep copy of the defaults for each target, merge the target dict
439
      # as found in the input file into that copy, and then hook up the
440
      # copy with the target-specific data merged into it as the replacement
441
      # target dict.
442
      old_target_dict = build_file_data['targets'][index]
443
      new_target_dict = gyp.simple_copy.deepcopy(
444
        build_file_data['target_defaults'])
445
      MergeDicts(new_target_dict, old_target_dict,
446
                 build_file_path, build_file_path)
447
      build_file_data['targets'][index] = new_target_dict
448
      index += 1
449
450
    # No longer needed.
451
    del build_file_data['target_defaults']
452
453
  # Look for dependencies.  This means that dependency resolution occurs
454
  # after "pre" conditionals and variable expansion, but before "post" -
455
  # in other words, you can't put a "dependencies" section inside a "post"
456
  # conditional within a target.
457
458
  dependencies = []
459
  if 'targets' in build_file_data:
460
    for target_dict in build_file_data['targets']:
461
      if 'dependencies' not in target_dict:
462
        continue
463
      for dependency in target_dict['dependencies']:
464
        dependencies.append(
465
            gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
466
467
  if load_dependencies:
468
    for dependency in dependencies:
469
      try:
470
        LoadTargetBuildFile(dependency, data, aux_data, variables,
471
                            includes, depth, check, load_dependencies)
472
      except Exception, e:
473
        gyp.common.ExceptionAppend(
474
          e, 'while loading dependencies of %s' % build_file_path)
475
        raise
476
  else:
477
    return (build_file_path, dependencies)
478
479
def CallLoadTargetBuildFile(global_flags,
480
                            build_file_path, variables,
481
                            includes, depth, check,
482
                            generator_input_info):
483
  """Wrapper around LoadTargetBuildFile for parallel processing.
484
485
     This wrapper is used when LoadTargetBuildFile is executed in
486
     a worker process.
487
  """
488
489
  try:
490
    signal.signal(signal.SIGINT, signal.SIG_IGN)
491
492
    # Apply globals so that the worker process behaves the same.
493
    for key, value in global_flags.iteritems():
494
      globals()[key] = value
495
496
    SetGeneratorGlobals(generator_input_info)
497
    result = LoadTargetBuildFile(build_file_path, per_process_data,
498
                                 per_process_aux_data, variables,
499
                                 includes, depth, check, False)
500
    if not result:
501
      return result
502
503
    (build_file_path, dependencies) = result
504
505
    # We can safely pop the build_file_data from per_process_data because it
506
    # will never be referenced by this process again, so we don't need to keep
507
    # it in the cache.
508
    build_file_data = per_process_data.pop(build_file_path)
509
510
    # This gets serialized and sent back to the main process via a pipe.
511
    # It's handled in LoadTargetBuildFileCallback.
512
    return (build_file_path,
513
            build_file_data,
514
            dependencies)
515
  except GypError, e:
516
    sys.stderr.write("gyp: %s\n" % e)
517
    return None
518
  except Exception, e:
519
    print >>sys.stderr, 'Exception:', e
520
    print >>sys.stderr, traceback.format_exc()
521
    return None
522
523
524
class ParallelProcessingError(Exception):
525
  pass
526
527
528
class ParallelState(object):
529
  """Class to keep track of state when processing input files in parallel.
530
531
  If build files are loaded in parallel, use this to keep track of
532
  state during farming out and processing parallel jobs. It's stored
533
  in a global so that the callback function can have access to it.
534
  """
535
536
  def __init__(self):
537
    # The multiprocessing pool.
538
    self.pool = None
539
    # The condition variable used to protect this object and notify
540
    # the main loop when there might be more data to process.
541
    self.condition = None
542
    # The "data" dict that was passed to LoadTargetBuildFileParallel
543
    self.data = None
544
    # The number of parallel calls outstanding; decremented when a response
545
    # was received.
546
    self.pending = 0
547
    # The set of all build files that have been scheduled, so we don't
548
    # schedule the same one twice.
549
    self.scheduled = set()
550
    # A list of dependency build file paths that haven't been scheduled yet.
551
    self.dependencies = []
552
    # Flag to indicate if there was an error in a child process.
553
    self.error = False
554
555
  def LoadTargetBuildFileCallback(self, result):
556
    """Handle the results of running LoadTargetBuildFile in another process.
557
    """
558
    self.condition.acquire()
559
    if not result:
560
      self.error = True
561
      self.condition.notify()
562
      self.condition.release()
563
      return
564
    (build_file_path0, build_file_data0, dependencies0) = result
565
    self.data[build_file_path0] = build_file_data0
566
    self.data['target_build_files'].add(build_file_path0)
567
    for new_dependency in dependencies0:
568
      if new_dependency not in self.scheduled:
569
        self.scheduled.add(new_dependency)
570
        self.dependencies.append(new_dependency)
571
    self.pending -= 1
572
    self.condition.notify()
573
    self.condition.release()
574
575
576
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
577
                                 check, generator_input_info):
578
  parallel_state = ParallelState()
579
  parallel_state.condition = threading.Condition()
580
  # Make copies of the build_files argument that we can modify while working.
581
  parallel_state.dependencies = list(build_files)
582
  parallel_state.scheduled = set(build_files)
583
  parallel_state.pending = 0
584
  parallel_state.data = data
585
586
  try:
587
    parallel_state.condition.acquire()
588
    while parallel_state.dependencies or parallel_state.pending:
589
      if parallel_state.error:
590
        break
591
      if not parallel_state.dependencies:
592
        parallel_state.condition.wait()
593
        continue
594
595
      dependency = parallel_state.dependencies.pop()
596
597
      parallel_state.pending += 1
598
      global_flags = {
599
        'path_sections': globals()['path_sections'],
600
        'non_configuration_keys': globals()['non_configuration_keys'],
601
        'multiple_toolsets': globals()['multiple_toolsets']}
602
603
      if not parallel_state.pool:
604
        parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
605
      parallel_state.pool.apply_async(
606
          CallLoadTargetBuildFile,
607
          args = (global_flags, dependency,
608
                  variables, includes, depth, check, generator_input_info),
609
          callback = parallel_state.LoadTargetBuildFileCallback)
610
  except KeyboardInterrupt, e:
611
    parallel_state.pool.terminate()
612
    raise e
613
614
  parallel_state.condition.release()
615
616
  parallel_state.pool.close()
617
  parallel_state.pool.join()
618
  parallel_state.pool = None
619
620
  if parallel_state.error:
621
    sys.exit(1)
622
623
# Look for the bracket that matches the first bracket seen in a
624
# string, and return the start and end as a tuple.  For example, if
625
# the input is something like "<(foo <(bar)) blah", then it would
626
# return (1, 13), indicating the entire string except for the leading
627
# "<" and trailing " blah".
628
LBRACKETS= set('{[(')
629
BRACKETS = {'}': '{', ']': '[', ')': '('}
630
def FindEnclosingBracketGroup(input_str):
631
  stack = []
632
  start = -1
633
  for index, char in enumerate(input_str):
634
    if char in LBRACKETS:
635
      stack.append(char)
636
      if start == -1:
637
        start = index
638
    elif char in BRACKETS:
639
      if not stack:
640
        return (-1, -1)
641
      if stack.pop() != BRACKETS[char]:
642
        return (-1, -1)
643
      if not stack:
644
        return (start, index + 1)
645
  return (-1, -1)
646
647
648
def IsStrCanonicalInt(string):
649
  """Returns True if |string| is in its canonical integer form.
650
651
  The canonical form is such that str(int(string)) == string.
652
  """
653
  if type(string) is str:
654
    # This function is called a lot so for maximum performance, avoid
655
    # involving regexps which would otherwise make the code much
656
    # shorter. Regexps would need twice the time of this function.
657
    if string:
658
      if string == "0":
659
        return True
660
      if string[0] == "-":
661
        string = string[1:]
662
        if not string:
663
          return False
664
      if '1' <= string[0] <= '9':
665
        return string.isdigit()
666
667
  return False
668
669
670
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
671
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
672
# In the last case, the inner "<()" is captured in match['content'].
673
early_variable_re = re.compile(
674
    r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
675
    r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
676
    r'\((?P<is_array>\s*\[?)'
677
    r'(?P<content>.*?)(\]?)\))')
678
679
# This matches the same as early_variable_re, but with '>' instead of '<'.
680
late_variable_re = re.compile(
681
    r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
682
    r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
683
    r'\((?P<is_array>\s*\[?)'
684
    r'(?P<content>.*?)(\]?)\))')
685
686
# This matches the same as early_variable_re, but with '^' instead of '<'.
687
latelate_variable_re = re.compile(
688
    r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
689
    r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
690
    r'\((?P<is_array>\s*\[?)'
691
    r'(?P<content>.*?)(\]?)\))')
692
693
# Global cache of results from running commands so they don't have to be run
694
# more then once.
695
cached_command_results = {}
696
697
698
def FixupPlatformCommand(cmd):
699
  if sys.platform == 'win32':
700
    if type(cmd) is list:
701
      cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
702
    else:
703
      cmd = re.sub('^cat ', 'type ', cmd)
704
  return cmd
705
706
707
PHASE_EARLY = 0
708
PHASE_LATE = 1
709
PHASE_LATELATE = 2
710
711
712
def ExpandVariables(input, phase, variables, build_file):
713
  # Look for the pattern that gets expanded into variables
714
  if phase == PHASE_EARLY:
715
    variable_re = early_variable_re
716
    expansion_symbol = '<'
717
  elif phase == PHASE_LATE:
718
    variable_re = late_variable_re
719
    expansion_symbol = '>'
720
  elif phase == PHASE_LATELATE:
721
    variable_re = latelate_variable_re
722
    expansion_symbol = '^'
723
  else:
724
    assert False
725
726
  input_str = str(input)
727
  if IsStrCanonicalInt(input_str):
728
    return int(input_str)
729
730
  # Do a quick scan to determine if an expensive regex search is warranted.
731
  if expansion_symbol not in input_str:
732
    return input_str
733
734
  # Get the entire list of matches as a list of MatchObject instances.
735
  # (using findall here would return strings instead of MatchObjects).
736
  matches = list(variable_re.finditer(input_str))
737
  if not matches:
738
    return input_str
739
740
  output = input_str
741
  # Reverse the list of matches so that replacements are done right-to-left.
742
  # That ensures that earlier replacements won't mess up the string in a
743
  # way that causes later calls to find the earlier substituted text instead
744
  # of what's intended for replacement.
745
  matches.reverse()
746
  for match_group in matches:
747
    match = match_group.groupdict()
748
    gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
749
    # match['replace'] is the substring to look for, match['type']
750
    # is the character code for the replacement type (< > <! >! <| >| <@
751
    # >@ <!@ >!@), match['is_array'] contains a '[' for command
752
    # arrays, and match['content'] is the name of the variable (< >)
753
    # or command to run (<! >!). match['command_string'] is an optional
754
    # command string. Currently, only 'pymod_do_main' is supported.
755
756
    # run_command is true if a ! variant is used.
757
    run_command = '!' in match['type']
758
    command_string = match['command_string']
759
760
    # file_list is true if a | variant is used.
761
    file_list = '|' in match['type']
762
763
    # Capture these now so we can adjust them later.
764
    replace_start = match_group.start('replace')
765
    replace_end = match_group.end('replace')
766
767
    # Find the ending paren, and re-evaluate the contained string.
768
    (c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
769
770
    # Adjust the replacement range to match the entire command
771
    # found by FindEnclosingBracketGroup (since the variable_re
772
    # probably doesn't match the entire command if it contained
773
    # nested variables).
774
    replace_end = replace_start + c_end
775
776
    # Find the "real" replacement, matching the appropriate closing
777
    # paren, and adjust the replacement start and end.
778
    replacement = input_str[replace_start:replace_end]
779
780
    # Figure out what the contents of the variable parens are.
781
    contents_start = replace_start + c_start + 1
782
    contents_end = replace_end - 1
783
    contents = input_str[contents_start:contents_end]
784
785
    # Do filter substitution now for <|().
786
    # Admittedly, this is different than the evaluation order in other
787
    # contexts. However, since filtration has no chance to run on <|(),
788
    # this seems like the only obvious way to give them access to filters.
789
    if file_list:
790
      processed_variables = gyp.simple_copy.deepcopy(variables)
791
      ProcessListFiltersInDict(contents, processed_variables)
792
      # Recurse to expand variables in the contents
793
      contents = ExpandVariables(contents, phase,
794
                                 processed_variables, build_file)
795
    else:
796
      # Recurse to expand variables in the contents
797
      contents = ExpandVariables(contents, phase, variables, build_file)
798
799
    # Strip off leading/trailing whitespace so that variable matches are
800
    # simpler below (and because they are rarely needed).
801
    contents = contents.strip()
802
803
    # expand_to_list is true if an @ variant is used.  In that case,
804
    # the expansion should result in a list.  Note that the caller
805
    # is to be expecting a list in return, and not all callers do
806
    # because not all are working in list context.  Also, for list
807
    # expansions, there can be no other text besides the variable
808
    # expansion in the input string.
809
    expand_to_list = '@' in match['type'] and input_str == replacement
810
811
    if run_command or file_list:
812
      # Find the build file's directory, so commands can be run or file lists
813
      # generated relative to it.
814
      build_file_dir = os.path.dirname(build_file)
815
      if build_file_dir == '' and not file_list:
816
        # If build_file is just a leaf filename indicating a file in the
817
        # current directory, build_file_dir might be an empty string.  Set
818
        # it to None to signal to subprocess.Popen that it should run the
819
        # command in the current directory.
820
        build_file_dir = None
821
822
    # Support <|(listfile.txt ...) which generates a file
823
    # containing items from a gyp list, generated at gyp time.
824
    # This works around actions/rules which have more inputs than will
825
    # fit on the command line.
826
    if file_list:
827
      if type(contents) is list:
828
        contents_list = contents
829
      else:
830
        contents_list = contents.split(' ')
831
      replacement = contents_list[0]
832
      if os.path.isabs(replacement):
833
        raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
834
835
      if not generator_filelist_paths:
836
        path = os.path.join(build_file_dir, replacement)
837
      else:
838
        if os.path.isabs(build_file_dir):
839
          toplevel = generator_filelist_paths['toplevel']
840
          rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
841
        else:
842
          rel_build_file_dir = build_file_dir
843
        qualified_out_dir = generator_filelist_paths['qualified_out_dir']
844
        path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
845
        gyp.common.EnsureDirExists(path)
846
847
      replacement = gyp.common.RelativePath(path, build_file_dir)
848
      f = gyp.common.WriteOnDiff(path)
849
      for i in contents_list[1:]:
850
        f.write('%s\n' % i)
851
      f.close()
852
853
    elif run_command:
854
      use_shell = True
855
      if match['is_array']:
856
        contents = eval(contents)
857
        use_shell = False
858
859
      # Check for a cached value to avoid executing commands, or generating
860
      # file lists more than once. The cache key contains the command to be
861
      # run as well as the directory to run it from, to account for commands
862
      # that depend on their current directory.
863
      # TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
864
      # someone could author a set of GYP files where each time the command
865
      # is invoked it produces different output by design. When the need
866
      # arises, the syntax should be extended to support no caching off a
867
      # command's output so it is run every time.
868
      cache_key = (str(contents), build_file_dir)
869
      cached_value = cached_command_results.get(cache_key, None)
870
      if cached_value is None:
871
        gyp.DebugOutput(gyp.DEBUG_VARIABLES,
872
                        "Executing command '%s' in directory '%s'",
873
                        contents, build_file_dir)
874
875
        replacement = ''
876
877
        if command_string == 'pymod_do_main':
878
          # <!pymod_do_main(modulename param eters) loads |modulename| as a
879
          # python module and then calls that module's DoMain() function,
880
          # passing ["param", "eters"] as a single list argument. For modules
881
          # that don't load quickly, this can be faster than
882
          # <!(python modulename param eters). Do this in |build_file_dir|.
883
          oldwd = os.getcwd()  # Python doesn't like os.open('.'): no fchdir.
884
          if build_file_dir:  # build_file_dir may be None (see above).
885
            os.chdir(build_file_dir)
886
          try:
887
888
            parsed_contents = shlex.split(contents)
889
            try:
890
              py_module = __import__(parsed_contents[0])
891
            except ImportError as e:
892
              raise GypError("Error importing pymod_do_main"
893
                             "module (%s): %s" % (parsed_contents[0], e))
894
            replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
895
          finally:
896
            os.chdir(oldwd)
897
          assert replacement != None
898
        elif command_string:
899
          raise GypError("Unknown command string '%s' in '%s'." %
900
                         (command_string, contents))
901
        else:
902
          # Fix up command with platform specific workarounds.
903
          contents = FixupPlatformCommand(contents)
904
          try:
905
            p = subprocess.Popen(contents, shell=use_shell,
906
                                 stdout=subprocess.PIPE,
907
                                 stderr=subprocess.PIPE,
908
                                 stdin=subprocess.PIPE,
909
                                 cwd=build_file_dir)
910
          except Exception, e:
911
            raise GypError("%s while executing command '%s' in %s" %
912
                           (e, contents, build_file))
913
914
          p_stdout, p_stderr = p.communicate('')
915
916
          if p.wait() != 0 or p_stderr:
917
            sys.stderr.write(p_stderr)
918
            # Simulate check_call behavior, since check_call only exists
919
            # in python 2.5 and later.
920
            raise GypError("Call to '%s' returned exit status %d while in %s." %
921
                           (contents, p.returncode, build_file))
922
          replacement = p_stdout.rstrip()
923
924
        cached_command_results[cache_key] = replacement
925
      else:
926
        gyp.DebugOutput(gyp.DEBUG_VARIABLES,
927
                        "Had cache value for command '%s' in directory '%s'",
928
                        contents,build_file_dir)
929
        replacement = cached_value
930
931
    else:
932
      if not contents in variables:
933
        if contents[-1] in ['!', '/']:
934
          # In order to allow cross-compiles (nacl) to happen more naturally,
935
          # we will allow references to >(sources/) etc. to resolve to
936
          # and empty list if undefined. This allows actions to:
937
          # 'action!': [
938
          #   '>@(_sources!)',
939
          # ],
940
          # 'action/': [
941
          #   '>@(_sources/)',
942
          # ],
943
          replacement = []
944
        else:
945
          raise GypError('Undefined variable ' + contents +
946
                         ' in ' + build_file)
947
      else:
948
        replacement = variables[contents]
949
950
    if type(replacement) is list:
951
      for item in replacement:
952
        if not contents[-1] == '/' and type(item) not in (str, int):
953
          raise GypError('Variable ' + contents +
954
                         ' must expand to a string or list of strings; ' +
955
                         'list contains a ' +
956
                         item.__class__.__name__)
957
      # Run through the list and handle variable expansions in it.  Since
958
      # the list is guaranteed not to contain dicts, this won't do anything
959
      # with conditions sections.
960
      ProcessVariablesAndConditionsInList(replacement, phase, variables,
961
                                          build_file)
962
    elif type(replacement) not in (str, int):
963
          raise GypError('Variable ' + contents +
964
                         ' must expand to a string or list of strings; ' +
965
                         'found a ' + replacement.__class__.__name__)
966
967
    if expand_to_list:
968
      # Expanding in list context.  It's guaranteed that there's only one
969
      # replacement to do in |input_str| and that it's this replacement.  See
970
      # above.
971
      if type(replacement) is list:
972
        # If it's already a list, make a copy.
973
        output = replacement[:]
974
      else:
975
        # Split it the same way sh would split arguments.
976
        output = shlex.split(str(replacement))
977
    else:
978
      # Expanding in string context.
979
      encoded_replacement = ''
980
      if type(replacement) is list:
981
        # When expanding a list into string context, turn the list items
982
        # into a string in a way that will work with a subprocess call.
983
        #
984
        # TODO(mark): This isn't completely correct.  This should
985
        # call a generator-provided function that observes the
986
        # proper list-to-argument quoting rules on a specific
987
        # platform instead of just calling the POSIX encoding
988
        # routine.
989
        encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
990
      else:
991
        encoded_replacement = replacement
992
993
      output = output[:replace_start] + str(encoded_replacement) + \
994
               output[replace_end:]
995
    # Prepare for the next match iteration.
996
    input_str = output
997
998
  if output == input:
999
    gyp.DebugOutput(gyp.DEBUG_VARIABLES,
1000
                    "Found only identity matches on %r, avoiding infinite "
1001
                    "recursion.",
1002
                    output)
1003
  else:
1004
    # Look for more matches now that we've replaced some, to deal with
1005
    # expanding local variables (variables defined in the same
1006
    # variables block as this one).
1007
    gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
1008
    if type(output) is list:
1009
      if output and type(output[0]) is list:
1010
        # Leave output alone if it's a list of lists.
1011
        # We don't want such lists to be stringified.
1012
        pass
1013
      else:
1014
        new_output = []
1015
        for item in output:
1016
          new_output.append(
1017
              ExpandVariables(item, phase, variables, build_file))
1018
        output = new_output
1019
    else:
1020
      output = ExpandVariables(output, phase, variables, build_file)
1021
1022
  # Convert all strings that are canonically-represented integers into integers.
1023
  if type(output) is list:
1024
    for index in xrange(0, len(output)):
1025
      if IsStrCanonicalInt(output[index]):
1026
        output[index] = int(output[index])
1027
  elif IsStrCanonicalInt(output):
1028
    output = int(output)
1029
1030
  return output
1031
1032
# The same condition is often evaluated over and over again so it
1033
# makes sense to cache as much as possible between evaluations.
1034
cached_conditions_asts = {}
1035
1036
def EvalCondition(condition, conditions_key, phase, variables, build_file):
1037
  """Returns the dict that should be used or None if the result was
1038
  that nothing should be used."""
1039
  if type(condition) is not list:
1040
    raise GypError(conditions_key + ' must be a list')
1041
  if len(condition) < 2:
1042
    # It's possible that condition[0] won't work in which case this
1043
    # attempt will raise its own IndexError.  That's probably fine.
1044
    raise GypError(conditions_key + ' ' + condition[0] +
1045
                   ' must be at least length 2, not ' + str(len(condition)))
1046
1047
  i = 0
1048
  result = None
1049
  while i < len(condition):
1050
    cond_expr = condition[i]
1051
    true_dict = condition[i + 1]
1052
    if type(true_dict) is not dict:
1053
      raise GypError('{} {} must be followed by a dictionary, not {}'.format(
1054
        conditions_key, cond_expr, type(true_dict)))
1055
    if len(condition) > i + 2 and type(condition[i + 2]) is dict:
1056
      false_dict = condition[i + 2]
1057
      i = i + 3
1058
      if i != len(condition):
1059
        raise GypError('{} {} has {} unexpected trailing items'.format(
1060
          conditions_key, cond_expr, len(condition) - i))
1061
    else:
1062
      false_dict = None
1063
      i = i + 2
1064
    if result == None:
1065
      result = EvalSingleCondition(
1066
          cond_expr, true_dict, false_dict, phase, variables, build_file)
1067
1068
  return result
1069
1070
1071
def EvalSingleCondition(
1072
    cond_expr, true_dict, false_dict, phase, variables, build_file):
1073
  """Returns true_dict if cond_expr evaluates to true, and false_dict
1074
  otherwise."""
1075
  # Do expansions on the condition itself.  Since the conditon can naturally
1076
  # contain variable references without needing to resort to GYP expansion
1077
  # syntax, this is of dubious value for variables, but someone might want to
1078
  # use a command expansion directly inside a condition.
1079
  cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
1080
                                       build_file)
1081
  if type(cond_expr_expanded) not in (str, int):
1082
    raise ValueError(
1083
          'Variable expansion in this context permits str and int ' + \
1084
            'only, found ' + cond_expr_expanded.__class__.__name__)
1085
1086
  try:
1087
    if cond_expr_expanded in cached_conditions_asts:
1088
      ast_code = cached_conditions_asts[cond_expr_expanded]
1089
    else:
1090
      ast_code = compile(cond_expr_expanded, '<string>', 'eval')
1091
      cached_conditions_asts[cond_expr_expanded] = ast_code
1092
    if eval(ast_code, {'__builtins__': None}, variables):
1093
      return true_dict
1094
    return false_dict
1095
  except SyntaxError, e:
1096
    syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
1097
                               'at character %d.' %
1098
                               (str(e.args[0]), e.text, build_file, e.offset),
1099
                               e.filename, e.lineno, e.offset, e.text)
1100
    raise syntax_error
1101
  except NameError, e:
1102
    gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
1103
                               (cond_expr_expanded, build_file))
1104
    raise GypError(e)
1105
1106
1107
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
1108
  # Process a 'conditions' or 'target_conditions' section in the_dict,
1109
  # depending on phase.
1110
  # early -> conditions
1111
  # late -> target_conditions
1112
  # latelate -> no conditions
1113
  #
1114
  # Each item in a conditions list consists of cond_expr, a string expression
1115
  # evaluated as the condition, and true_dict, a dict that will be merged into
1116
  # the_dict if cond_expr evaluates to true.  Optionally, a third item,
1117
  # false_dict, may be present.  false_dict is merged into the_dict if
1118
  # cond_expr evaluates to false.
1119
  #
1120
  # Any dict merged into the_dict will be recursively processed for nested
1121
  # conditionals and other expansions, also according to phase, immediately
1122
  # prior to being merged.
1123
1124
  if phase == PHASE_EARLY:
1125
    conditions_key = 'conditions'
1126
  elif phase == PHASE_LATE:
1127
    conditions_key = 'target_conditions'
1128
  elif phase == PHASE_LATELATE:
1129
    return
1130
  else:
1131
    assert False
1132
1133
  if not conditions_key in the_dict:
1134
    return
1135
1136
  conditions_list = the_dict[conditions_key]
1137
  # Unhook the conditions list, it's no longer needed.
1138
  del the_dict[conditions_key]
1139
1140
  for condition in conditions_list:
1141
    merge_dict = EvalCondition(condition, conditions_key, phase, variables,
1142
                               build_file)
1143
1144
    if merge_dict != None:
1145
      # Expand variables and nested conditinals in the merge_dict before
1146
      # merging it.
1147
      ProcessVariablesAndConditionsInDict(merge_dict, phase,
1148
                                          variables, build_file)
1149
1150
      MergeDicts(the_dict, merge_dict, build_file, build_file)
1151
1152
1153
def LoadAutomaticVariablesFromDict(variables, the_dict):
1154
  # Any keys with plain string values in the_dict become automatic variables.
1155
  # The variable name is the key name with a "_" character prepended.
1156
  for key, value in the_dict.iteritems():
1157
    if type(value) in (str, int, list):
1158
      variables['_' + key] = value
1159
1160
1161
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
1162
  # Any keys in the_dict's "variables" dict, if it has one, becomes a
1163
  # variable.  The variable name is the key name in the "variables" dict.
1164
  # Variables that end with the % character are set only if they are unset in
1165
  # the variables dict.  the_dict_key is the name of the key that accesses
1166
  # the_dict in the_dict's parent dict.  If the_dict's parent is not a dict
1167
  # (it could be a list or it could be parentless because it is a root dict),
1168
  # the_dict_key will be None.
1169
  for key, value in the_dict.get('variables', {}).iteritems():
1170
    if type(value) not in (str, int, list):
1171
      continue
1172
1173
    if key.endswith('%'):
1174
      variable_name = key[:-1]
1175
      if variable_name in variables:
1176
        # If the variable is already set, don't set it.
1177
        continue
1178
      if the_dict_key is 'variables' and variable_name in the_dict:
1179
        # If the variable is set without a % in the_dict, and the_dict is a
1180
        # variables dict (making |variables| a varaibles sub-dict of a
1181
        # variables dict), use the_dict's definition.
1182
        value = the_dict[variable_name]
1183
    else:
1184
      variable_name = key
1185
1186
    variables[variable_name] = value
1187
1188
1189
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
1190
                                        build_file, the_dict_key=None):
1191
  """Handle all variable and command expansion and conditional evaluation.
1192
1193
  This function is the public entry point for all variable expansions and
1194
  conditional evaluations.  The variables_in dictionary will not be modified
1195
  by this function.
1196
  """
1197
1198
  # Make a copy of the variables_in dict that can be modified during the
1199
  # loading of automatics and the loading of the variables dict.
1200
  variables = variables_in.copy()
1201
  LoadAutomaticVariablesFromDict(variables, the_dict)
1202
1203
  if 'variables' in the_dict:
1204
    # Make sure all the local variables are added to the variables
1205
    # list before we process them so that you can reference one
1206
    # variable from another.  They will be fully expanded by recursion
1207
    # in ExpandVariables.
1208
    for key, value in the_dict['variables'].iteritems():
1209
      variables[key] = value
1210
1211
    # Handle the associated variables dict first, so that any variable
1212
    # references within can be resolved prior to using them as variables.
1213
    # Pass a copy of the variables dict to avoid having it be tainted.
1214
    # Otherwise, it would have extra automatics added for everything that
1215
    # should just be an ordinary variable in this scope.
1216
    ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
1217
                                        variables, build_file, 'variables')
1218
1219
  LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
1220
1221
  for key, value in the_dict.iteritems():
1222
    # Skip "variables", which was already processed if present.
1223
    if key != 'variables' and type(value) is str:
1224
      expanded = ExpandVariables(value, phase, variables, build_file)
1225
      if type(expanded) not in (str, int):
1226
        raise ValueError(
1227
              'Variable expansion in this context permits str and int ' + \
1228
              'only, found ' + expanded.__class__.__name__ + ' for ' + key)
1229
      the_dict[key] = expanded
1230
1231
  # Variable expansion may have resulted in changes to automatics.  Reload.
1232
  # TODO(mark): Optimization: only reload if no changes were made.
1233
  variables = variables_in.copy()
1234
  LoadAutomaticVariablesFromDict(variables, the_dict)
1235
  LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
1236
1237
  # Process conditions in this dict.  This is done after variable expansion
1238
  # so that conditions may take advantage of expanded variables.  For example,
1239
  # if the_dict contains:
1240
  #   {'type':       '<(library_type)',
1241
  #    'conditions': [['_type=="static_library"', { ... }]]},
1242
  # _type, as used in the condition, will only be set to the value of
1243
  # library_type if variable expansion is performed before condition
1244
  # processing.  However, condition processing should occur prior to recursion
1245
  # so that variables (both automatic and "variables" dict type) may be
1246
  # adjusted by conditions sections, merged into the_dict, and have the
1247
  # intended impact on contained dicts.
1248
  #
1249
  # This arrangement means that a "conditions" section containing a "variables"
1250
  # section will only have those variables effective in subdicts, not in
1251
  # the_dict.  The workaround is to put a "conditions" section within a
1252
  # "variables" section.  For example:
1253
  #   {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
1254
  #    'defines':    ['<(define)'],
1255
  #    'my_subdict': {'defines': ['<(define)']}},
1256
  # will not result in "IS_MAC" being appended to the "defines" list in the
1257
  # current scope but would result in it being appended to the "defines" list
1258
  # within "my_subdict".  By comparison:
1259
  #   {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
1260
  #    'defines':    ['<(define)'],
1261
  #    'my_subdict': {'defines': ['<(define)']}},
1262
  # will append "IS_MAC" to both "defines" lists.
1263
1264
  # Evaluate conditions sections, allowing variable expansions within them
1265
  # as well as nested conditionals.  This will process a 'conditions' or
1266
  # 'target_conditions' section, perform appropriate merging and recursive
1267
  # conditional and variable processing, and then remove the conditions section
1268
  # from the_dict if it is present.
1269
  ProcessConditionsInDict(the_dict, phase, variables, build_file)
1270
1271
  # Conditional processing may have resulted in changes to automatics or the
1272
  # variables dict.  Reload.
1273
  variables = variables_in.copy()
1274
  LoadAutomaticVariablesFromDict(variables, the_dict)
1275
  LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
1276
1277
  # Recurse into child dicts, or process child lists which may result in
1278
  # further recursion into descendant dicts.
1279
  for key, value in the_dict.iteritems():
1280
    # Skip "variables" and string values, which were already processed if
1281
    # present.
1282
    if key == 'variables' or type(value) is str:
1283
      continue
1284
    if type(value) is dict:
1285
      # Pass a copy of the variables dict so that subdicts can't influence
1286
      # parents.
1287
      ProcessVariablesAndConditionsInDict(value, phase, variables,
1288
                                          build_file, key)
1289
    elif type(value) is list:
1290
      # The list itself can't influence the variables dict, and
1291
      # ProcessVariablesAndConditionsInList will make copies of the variables
1292
      # dict if it needs to pass it to something that can influence it.  No
1293
      # copy is necessary here.
1294
      ProcessVariablesAndConditionsInList(value, phase, variables,
1295
                                          build_file)
1296
    elif type(value) is not int:
1297
      raise TypeError('Unknown type ' + value.__class__.__name__ + \
1298
                      ' for ' + key)
1299
1300
1301
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
1302
                                        build_file):
1303
  # Iterate using an index so that new values can be assigned into the_list.
1304
  index = 0
1305
  while index < len(the_list):
1306
    item = the_list[index]
1307
    if type(item) is dict:
1308
      # Make a copy of the variables dict so that it won't influence anything
1309
      # outside of its own scope.
1310
      ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
1311
    elif type(item) is list:
1312
      ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
1313
    elif type(item) is str:
1314
      expanded = ExpandVariables(item, phase, variables, build_file)
1315
      if type(expanded) in (str, int):
1316
        the_list[index] = expanded
1317
      elif type(expanded) is list:
1318
        the_list[index:index+1] = expanded
1319
        index += len(expanded)
1320
1321
        # index now identifies the next item to examine.  Continue right now
1322
        # without falling into the index increment below.
1323
        continue
1324
      else:
1325
        raise ValueError(
1326
              'Variable expansion in this context permits strings and ' + \
1327
              'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
1328
              index)
1329
    elif type(item) is not int:
1330
      raise TypeError('Unknown type ' + item.__class__.__name__ + \
1331
                      ' at index ' + index)
1332
    index = index + 1
1333
1334
1335
def BuildTargetsDict(data):
1336
  """Builds a dict mapping fully-qualified target names to their target dicts.
1337
1338
  |data| is a dict mapping loaded build files by pathname relative to the
1339
  current directory.  Values in |data| are build file contents.  For each
1340
  |data| value with a "targets" key, the value of the "targets" key is taken
1341
  as a list containing target dicts.  Each target's fully-qualified name is
1342
  constructed from the pathname of the build file (|data| key) and its
1343
  "target_name" property.  These fully-qualified names are used as the keys
1344
  in the returned dict.  These keys provide access to the target dicts,
1345
  the dicts in the "targets" lists.
1346
  """
1347
1348
  targets = {}
1349
  for build_file in data['target_build_files']:
1350
    for target in data[build_file].get('targets', []):
1351
      target_name = gyp.common.QualifiedTarget(build_file,
1352
                                               target['target_name'],
1353
                                               target['toolset'])
1354
      if target_name in targets:
1355
        raise GypError('Duplicate target definitions for ' + target_name)
1356
      targets[target_name] = target
1357
1358
  return targets
1359
1360
1361
def QualifyDependencies(targets):
1362
  """Make dependency links fully-qualified relative to the current directory.
1363
1364
  |targets| is a dict mapping fully-qualified target names to their target
1365
  dicts.  For each target in this dict, keys known to contain dependency
1366
  links are examined, and any dependencies referenced will be rewritten
1367
  so that they are fully-qualified and relative to the current directory.
1368
  All rewritten dependencies are suitable for use as keys to |targets| or a
1369
  similar dict.
1370
  """
1371
1372
  all_dependency_sections = [dep + op
1373
                             for dep in dependency_sections
1374
                             for op in ('', '!', '/')]
1375
1376
  for target, target_dict in targets.iteritems():
1377
    target_build_file = gyp.common.BuildFile(target)
1378
    toolset = target_dict['toolset']
1379
    for dependency_key in all_dependency_sections:
1380
      dependencies = target_dict.get(dependency_key, [])
1381
      for index in xrange(0, len(dependencies)):
1382
        dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
1383
            target_build_file, dependencies[index], toolset)
1384
        if not multiple_toolsets:
1385
          # Ignore toolset specification in the dependency if it is specified.
1386
          dep_toolset = toolset
1387
        dependency = gyp.common.QualifiedTarget(dep_file,
1388
                                                dep_target,
1389
                                                dep_toolset)
1390
        dependencies[index] = dependency
1391
1392
        # Make sure anything appearing in a list other than "dependencies" also
1393
        # appears in the "dependencies" list.
1394
        if dependency_key != 'dependencies' and \
1395
           dependency not in target_dict['dependencies']:
1396
          raise GypError('Found ' + dependency + ' in ' + dependency_key +
1397
                         ' of ' + target + ', but not in dependencies')
1398
1399
1400
def ExpandWildcardDependencies(targets, data):
1401
  """Expands dependencies specified as build_file:*.
1402
1403
  For each target in |targets|, examines sections containing links to other
1404
  targets.  If any such section contains a link of the form build_file:*, it
1405
  is taken as a wildcard link, and is expanded to list each target in
1406
  build_file.  The |data| dict provides access to build file dicts.
1407
1408
  Any target that does not wish to be included by wildcard can provide an
1409
  optional "suppress_wildcard" key in its target dict.  When present and
1410
  true, a wildcard dependency link will not include such targets.
1411
1412
  All dependency names, including the keys to |targets| and the values in each
1413
  dependency list, must be qualified when this function is called.
1414
  """
1415
1416
  for target, target_dict in targets.iteritems():
1417
    toolset = target_dict['toolset']
1418
    target_build_file = gyp.common.BuildFile(target)
1419
    for dependency_key in dependency_sections:
1420
      dependencies = target_dict.get(dependency_key, [])
1421
1422
      # Loop this way instead of "for dependency in" or "for index in xrange"
1423
      # because the dependencies list will be modified within the loop body.
1424
      index = 0
1425
      while index < len(dependencies):
1426
        (dependency_build_file, dependency_target, dependency_toolset) = \
1427
            gyp.common.ParseQualifiedTarget(dependencies[index])
1428
        if dependency_target != '*' and dependency_toolset != '*':
1429
          # Not a wildcard.  Keep it moving.
1430
          index = index + 1
1431
          continue
1432
1433
        if dependency_build_file == target_build_file:
1434
          # It's an error for a target to depend on all other targets in
1435
          # the same file, because a target cannot depend on itself.
1436
          raise GypError('Found wildcard in ' + dependency_key + ' of ' +
1437
                         target + ' referring to same build file')
1438
1439
        # Take the wildcard out and adjust the index so that the next
1440
        # dependency in the list will be processed the next time through the
1441
        # loop.
1442
        del dependencies[index]
1443
        index = index - 1
1444
1445
        # Loop through the targets in the other build file, adding them to
1446
        # this target's list of dependencies in place of the removed
1447
        # wildcard.
1448
        dependency_target_dicts = data[dependency_build_file]['targets']
1449
        for dependency_target_dict in dependency_target_dicts:
1450
          if int(dependency_target_dict.get('suppress_wildcard', False)):
1451
            continue
1452
          dependency_target_name = dependency_target_dict['target_name']
1453
          if (dependency_target != '*' and
1454
              dependency_target != dependency_target_name):
1455
            continue
1456
          dependency_target_toolset = dependency_target_dict['toolset']
1457
          if (dependency_toolset != '*' and
1458
              dependency_toolset != dependency_target_toolset):
1459
            continue
1460
          dependency = gyp.common.QualifiedTarget(dependency_build_file,
1461
                                                  dependency_target_name,
1462
                                                  dependency_target_toolset)
1463
          index = index + 1
1464
          dependencies.insert(index, dependency)
1465
1466
        index = index + 1
1467
1468
1469
def Unify(l):
1470
  """Removes duplicate elements from l, keeping the first element."""
1471
  seen = {}
1472
  return [seen.setdefault(e, e) for e in l if e not in seen]
1473
1474
1475
def RemoveDuplicateDependencies(targets):
1476
  """Makes sure every dependency appears only once in all targets's dependency
1477
  lists."""
1478
  for target_name, target_dict in targets.iteritems():
1479
    for dependency_key in dependency_sections:
1480
      dependencies = target_dict.get(dependency_key, [])
1481
      if dependencies:
1482
        target_dict[dependency_key] = Unify(dependencies)
1483
1484
1485
def Filter(l, item):
1486
  """Removes item from l."""
1487
  res = {}
1488
  return [res.setdefault(e, e) for e in l if e != item]
1489
1490
1491
def RemoveSelfDependencies(targets):
1492
  """Remove self dependencies from targets that have the prune_self_dependency
1493
  variable set."""
1494
  for target_name, target_dict in targets.iteritems():
1495
    for dependency_key in dependency_sections:
1496
      dependencies = target_dict.get(dependency_key, [])
1497
      if dependencies:
1498
        for t in dependencies:
1499
          if t == target_name:
1500
            if targets[t].get('variables', {}).get('prune_self_dependency', 0):
1501
              target_dict[dependency_key] = Filter(dependencies, target_name)
1502
1503
1504
def RemoveLinkDependenciesFromNoneTargets(targets):
1505
  """Remove dependencies having the 'link_dependency' attribute from the 'none'
1506
  targets."""
1507
  for target_name, target_dict in targets.iteritems():
1508
    for dependency_key in dependency_sections:
1509
      dependencies = target_dict.get(dependency_key, [])
1510
      if dependencies:
1511
        for t in dependencies:
1512
          if target_dict.get('type', None) == 'none':
1513
            if targets[t].get('variables', {}).get('link_dependency', 0):
1514
              target_dict[dependency_key] = \
1515
                  Filter(target_dict[dependency_key], t)
1516
1517
1518
class DependencyGraphNode(object):
1519
  """
1520
1521
  Attributes:
1522
    ref: A reference to an object that this DependencyGraphNode represents.
1523
    dependencies: List of DependencyGraphNodes on which this one depends.
1524
    dependents: List of DependencyGraphNodes that depend on this one.
1525
  """
1526
1527
  class CircularException(GypError):
1528
    pass
1529
1530
  def __init__(self, ref):
1531
    self.ref = ref
1532
    self.dependencies = []
1533
    self.dependents = []
1534
1535
  def __repr__(self):
1536
    return '<DependencyGraphNode: %r>' % self.ref
1537
1538
  def FlattenToList(self):
1539
    # flat_list is the sorted list of dependencies - actually, the list items
1540
    # are the "ref" attributes of DependencyGraphNodes.  Every target will
1541
    # appear in flat_list after all of its dependencies, and before all of its
1542
    # dependents.
1543
    flat_list = OrderedSet()
1544
1545
    # in_degree_zeros is the list of DependencyGraphNodes that have no
1546
    # dependencies not in flat_list.  Initially, it is a copy of the children
1547
    # of this node, because when the graph was built, nodes with no
1548
    # dependencies were made implicit dependents of the root node.
1549
    in_degree_zeros = set(self.dependents[:])
1550
1551
    while in_degree_zeros:
1552
      # Nodes in in_degree_zeros have no dependencies not in flat_list, so they
1553
      # can be appended to flat_list.  Take these nodes out of in_degree_zeros
1554
      # as work progresses, so that the next node to process from the list can
1555
      # always be accessed at a consistent position.
1556
      node = in_degree_zeros.pop()
1557
      flat_list.add(node.ref)
1558
1559
      # Look at dependents of the node just added to flat_list.  Some of them
1560
      # may now belong in in_degree_zeros.
1561
      for node_dependent in node.dependents:
1562
        is_in_degree_zero = True
1563
        # TODO: We want to check through the
1564
        # node_dependent.dependencies list but if it's long and we
1565
        # always start at the beginning, then we get O(n^2) behaviour.
1566
        for node_dependent_dependency in node_dependent.dependencies:
1567
          if not node_dependent_dependency.ref in flat_list:
1568
            # The dependent one or more dependencies not in flat_list.  There
1569
            # will be more chances to add it to flat_list when examining
1570
            # it again as a dependent of those other dependencies, provided
1571
            # that there are no cycles.
1572
            is_in_degree_zero = False
1573
            break
1574
1575
        if is_in_degree_zero:
1576
          # All of the dependent's dependencies are already in flat_list.  Add
1577
          # it to in_degree_zeros where it will be processed in a future
1578
          # iteration of the outer loop.
1579
          in_degree_zeros.add(node_dependent)
1580
1581
    return list(flat_list)
1582
1583
  def FindCycles(self):
1584
    """
1585
    Returns a list of cycles in the graph, where each cycle is its own list.
1586
    """
1587
    results = []
1588
    visited = set()
1589
1590
    def Visit(node, path):
1591
      for child in node.dependents:
1592
        if child in path:
1593
          results.append([child] + path[:path.index(child) + 1])
1594
        elif not child in visited:
1595
          visited.add(child)
1596
          Visit(child, [child] + path)
1597
1598
    visited.add(self)
1599
    Visit(self, [self])
1600
1601
    return results
1602
1603
  def DirectDependencies(self, dependencies=None):
1604
    """Returns a list of just direct dependencies."""
1605
    if dependencies == None:
1606
      dependencies = []
1607
1608
    for dependency in self.dependencies:
1609
      # Check for None, corresponding to the root node.
1610
      if dependency.ref != None and dependency.ref not in dependencies:
1611
        dependencies.append(dependency.ref)
1612
1613
    return dependencies
1614
1615
  def _AddImportedDependencies(self, targets, dependencies=None):
1616
    """Given a list of direct dependencies, adds indirect dependencies that
1617
    other dependencies have declared to export their settings.
1618
1619
    This method does not operate on self.  Rather, it operates on the list
1620
    of dependencies in the |dependencies| argument.  For each dependency in
1621
    that list, if any declares that it exports the settings of one of its
1622
    own dependencies, those dependencies whose settings are "passed through"
1623
    are added to the list.  As new items are added to the list, they too will
1624
    be processed, so it is possible to import settings through multiple levels
1625
    of dependencies.
1626
1627
    This method is not terribly useful on its own, it depends on being
1628
    "primed" with a list of direct dependencies such as one provided by
1629
    DirectDependencies.  DirectAndImportedDependencies is intended to be the
1630
    public entry point.
1631
    """
1632
1633
    if dependencies == None:
1634
      dependencies = []
1635
1636
    index = 0
1637
    while index < len(dependencies):
1638
      dependency = dependencies[index]
1639
      dependency_dict = targets[dependency]
1640
      # Add any dependencies whose settings should be imported to the list
1641
      # if not already present.  Newly-added items will be checked for
1642
      # their own imports when the list iteration reaches them.
1643
      # Rather than simply appending new items, insert them after the
1644
      # dependency that exported them.  This is done to more closely match
1645
      # the depth-first method used by DeepDependencies.
1646
      add_index = 1
1647
      for imported_dependency in \
1648
          dependency_dict.get('export_dependent_settings', []):
1649
        if imported_dependency not in dependencies:
1650
          dependencies.insert(index + add_index, imported_dependency)
1651
          add_index = add_index + 1
1652
      index = index + 1
1653
1654
    return dependencies
1655
1656
  def DirectAndImportedDependencies(self, targets, dependencies=None):
1657
    """Returns a list of a target's direct dependencies and all indirect
1658
    dependencies that a dependency has advertised settings should be exported
1659
    through the dependency for.
1660
    """
1661
1662
    dependencies = self.DirectDependencies(dependencies)
1663
    return self._AddImportedDependencies(targets, dependencies)
1664
1665
  def DeepDependencies(self, dependencies=None):
1666
    """Returns an OrderedSet of all of a target's dependencies, recursively."""
1667
    if dependencies is None:
1668
      # Using a list to get ordered output and a set to do fast "is it
1669
      # already added" checks.
1670
      dependencies = OrderedSet()
1671
1672
    for dependency in self.dependencies:
1673
      # Check for None, corresponding to the root node.
1674
      if dependency.ref is None:
1675
        continue
1676
      if dependency.ref not in dependencies:
1677
        dependency.DeepDependencies(dependencies)
1678
        dependencies.add(dependency.ref)
1679
1680
    return dependencies
1681
1682
  def _LinkDependenciesInternal(self, targets, include_shared_libraries,
1683
                                dependencies=None, initial=True):
1684
    """Returns an OrderedSet of dependency targets that are linked
1685
    into this target.
1686
1687
    This function has a split personality, depending on the setting of
1688
    |initial|.  Outside callers should always leave |initial| at its default
1689
    setting.
1690
1691
    When adding a target to the list of dependencies, this function will
1692
    recurse into itself with |initial| set to False, to collect dependencies
1693
    that are linked into the linkable target for which the list is being built.
1694
1695
    If |include_shared_libraries| is False, the resulting dependencies will not
1696
    include shared_library targets that are linked into this target.
1697
    """
1698
    if dependencies is None:
1699
      # Using a list to get ordered output and a set to do fast "is it
1700
      # already added" checks.
1701
      dependencies = OrderedSet()
1702
1703
    # Check for None, corresponding to the root node.
1704
    if self.ref is None:
1705
      return dependencies
1706
1707
    # It's kind of sucky that |targets| has to be passed into this function,
1708
    # but that's presently the easiest way to access the target dicts so that
1709
    # this function can find target types.
1710
1711
    if 'target_name' not in targets[self.ref]:
1712
      raise GypError("Missing 'target_name' field in target.")
1713
1714
    if 'type' not in targets[self.ref]:
1715
      raise GypError("Missing 'type' field in target %s" %
1716
                     targets[self.ref]['target_name'])
1717
1718
    target_type = targets[self.ref]['type']
1719
1720
    is_linkable = target_type in linkable_types
1721
1722
    if initial and not is_linkable:
1723
      # If this is the first target being examined and it's not linkable,
1724
      # return an empty list of link dependencies, because the link
1725
      # dependencies are intended to apply to the target itself (initial is
1726
      # True) and this target won't be linked.
1727
      return dependencies
1728
1729
    # Don't traverse 'none' targets if explicitly excluded.
1730
    if (target_type == 'none' and
1731
        not targets[self.ref].get('dependencies_traverse', True)):
1732
      dependencies.add(self.ref)
1733
      return dependencies
1734
1735
    # Executables, mac kernel extensions and loadable modules are already fully
1736
    # and finally linked. Nothing else can be a link dependency of them, there
1737
    # can only be dependencies in the sense that a dependent target might run
1738
    # an executable or load the loadable_module.
1739
    if not initial and target_type in ('executable', 'loadable_module',
1740
                                       'mac_kernel_extension'):
1741
      return dependencies
1742
1743
    # Shared libraries are already fully linked.  They should only be included
1744
    # in |dependencies| when adjusting static library dependencies (in order to
1745
    # link against the shared_library's import lib), but should not be included
1746
    # in |dependencies| when propagating link_settings.
1747
    # The |include_shared_libraries| flag controls which of these two cases we
1748
    # are handling.
1749
    if (not initial and target_type == 'shared_library' and
1750
        not include_shared_libraries):
1751
      return dependencies
1752
1753
    # The target is linkable, add it to the list of link dependencies.
1754
    if self.ref not in dependencies:
1755
      dependencies.add(self.ref)
1756
      if initial or not is_linkable:
1757
        # If this is a subsequent target and it's linkable, don't look any
1758
        # further for linkable dependencies, as they'll already be linked into
1759
        # this target linkable.  Always look at dependencies of the initial
1760
        # target, and always look at dependencies of non-linkables.
1761
        for dependency in self.dependencies:
1762
          dependency._LinkDependenciesInternal(targets,
1763
                                               include_shared_libraries,
1764
                                               dependencies, False)
1765
1766
    return dependencies
1767
1768
  def DependenciesForLinkSettings(self, targets):
1769
    """
1770
    Returns a list of dependency targets whose link_settings should be merged
1771
    into this target.
1772
    """
1773
1774
    # TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
1775
    # link_settings are propagated.  So for now, we will allow it, unless the
1776
    # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
1777
    # False.  Once chrome is fixed, we can remove this flag.
1778
    include_shared_libraries = \
1779
        targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
1780
    return self._LinkDependenciesInternal(targets, include_shared_libraries)
1781
1782
  def DependenciesToLinkAgainst(self, targets):
1783
    """
1784
    Returns a list of dependency targets that are linked into this target.
1785
    """
1786
    return self._LinkDependenciesInternal(targets, True)
1787
1788
1789
def BuildDependencyList(targets):
1790
  # Create a DependencyGraphNode for each target.  Put it into a dict for easy
1791
  # access.
1792
  dependency_nodes = {}
1793
  for target, spec in targets.iteritems():
1794
    if target not in dependency_nodes:
1795
      dependency_nodes[target] = DependencyGraphNode(target)
1796
1797
  # Set up the dependency links.  Targets that have no dependencies are treated
1798
  # as dependent on root_node.
1799
  root_node = DependencyGraphNode(None)
1800
  for target, spec in targets.iteritems():
1801
    target_node = dependency_nodes[target]
1802
    target_build_file = gyp.common.BuildFile(target)
1803
    dependencies = spec.get('dependencies')
1804
    if not dependencies:
1805
      target_node.dependencies = [root_node]
1806
      root_node.dependents.append(target_node)
1807
    else:
1808
      for dependency in dependencies:
1809
        dependency_node = dependency_nodes.get(dependency)
1810
        if not dependency_node:
1811
          raise GypError("Dependency '%s' not found while "
1812
                         "trying to load target %s" % (dependency, target))
1813
        target_node.dependencies.append(dependency_node)
1814
        dependency_node.dependents.append(target_node)
1815
1816
  flat_list = root_node.FlattenToList()
1817
1818
  # If there's anything left unvisited, there must be a circular dependency
1819
  # (cycle).
1820
  if len(flat_list) != len(targets):
1821
    if not root_node.dependents:
1822
      # If all targets have dependencies, add the first target as a dependent
1823
      # of root_node so that the cycle can be discovered from root_node.
1824
      target = targets.keys()[0]
1825
      target_node = dependency_nodes[target]
1826
      target_node.dependencies.append(root_node)
1827
      root_node.dependents.append(target_node)
1828
1829
    cycles = []
1830
    for cycle in root_node.FindCycles():
1831
      paths = [node.ref for node in cycle]
1832
      cycles.append('Cycle: %s' % ' -> '.join(paths))
1833
    raise DependencyGraphNode.CircularException(
1834
        'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
1835
1836
  return [dependency_nodes, flat_list]
1837
1838
1839
def VerifyNoGYPFileCircularDependencies(targets):
1840
  # Create a DependencyGraphNode for each gyp file containing a target.  Put
1841
  # it into a dict for easy access.
1842
  dependency_nodes = {}
1843
  for target in targets.iterkeys():
1844
    build_file = gyp.common.BuildFile(target)
1845
    if not build_file in dependency_nodes:
1846
      dependency_nodes[build_file] = DependencyGraphNode(build_file)
1847
1848
  # Set up the dependency links.
1849
  for target, spec in targets.iteritems():
1850
    build_file = gyp.common.BuildFile(target)
1851
    build_file_node = dependency_nodes[build_file]
1852
    target_dependencies = spec.get('dependencies', [])
1853
    for dependency in target_dependencies:
1854
      try:
1855
        dependency_build_file = gyp.common.BuildFile(dependency)
1856
      except GypError, e:
1857
        gyp.common.ExceptionAppend(
1858
            e, 'while computing dependencies of .gyp file %s' % build_file)
1859
        raise
1860
1861
      if dependency_build_file == build_file:
1862
        # A .gyp file is allowed to refer back to itself.
1863
        continue
1864
      dependency_node = dependency_nodes.get(dependency_build_file)
1865
      if not dependency_node:
1866
        raise GypError("Dependancy '%s' not found" % dependency_build_file)
1867
      if dependency_node not in build_file_node.dependencies:
1868
        build_file_node.dependencies.append(dependency_node)
1869
        dependency_node.dependents.append(build_file_node)
1870
1871
1872
  # Files that have no dependencies are treated as dependent on root_node.
1873
  root_node = DependencyGraphNode(None)
1874
  for build_file_node in dependency_nodes.itervalues():
1875
    if len(build_file_node.dependencies) == 0:
1876
      build_file_node.dependencies.append(root_node)
1877
      root_node.dependents.append(build_file_node)
1878
1879
  flat_list = root_node.FlattenToList()
1880
1881
  # If there's anything left unvisited, there must be a circular dependency
1882
  # (cycle).
1883
  if len(flat_list) != len(dependency_nodes):
1884
    if not root_node.dependents:
1885
      # If all files have dependencies, add the first file as a dependent
1886
      # of root_node so that the cycle can be discovered from root_node.
1887
      file_node = dependency_nodes.values()[0]
1888
      file_node.dependencies.append(root_node)
1889
      root_node.dependents.append(file_node)
1890
    cycles = []
1891
    for cycle in root_node.FindCycles():
1892
      paths = [node.ref for node in cycle]
1893
      cycles.append('Cycle: %s' % ' -> '.join(paths))
1894
    raise DependencyGraphNode.CircularException(
1895
        'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
1896
1897
1898
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
1899
  # key should be one of all_dependent_settings, direct_dependent_settings,
1900
  # or link_settings.
1901
1902
  for target in flat_list:
1903
    target_dict = targets[target]
1904
    build_file = gyp.common.BuildFile(target)
1905
1906
    if key == 'all_dependent_settings':
1907
      dependencies = dependency_nodes[target].DeepDependencies()
1908
    elif key == 'direct_dependent_settings':
1909
      dependencies = \
1910
          dependency_nodes[target].DirectAndImportedDependencies(targets)
1911
    elif key == 'link_settings':
1912
      dependencies = \
1913
          dependency_nodes[target].DependenciesForLinkSettings(targets)
1914
    else:
1915
      raise GypError("DoDependentSettings doesn't know how to determine "
1916
                      'dependencies for ' + key)
1917
1918
    for dependency in dependencies:
1919
      dependency_dict = targets[dependency]
1920
      if not key in dependency_dict:
1921
        continue
1922
      dependency_build_file = gyp.common.BuildFile(dependency)
1923
      MergeDicts(target_dict, dependency_dict[key],
1924
                 build_file, dependency_build_file)
1925
1926
1927
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
1928
                                    sort_dependencies):
1929
  # Recompute target "dependencies" properties.  For each static library
1930
  # target, remove "dependencies" entries referring to other static libraries,
1931
  # unless the dependency has the "hard_dependency" attribute set.  For each
1932
  # linkable target, add a "dependencies" entry referring to all of the
1933
  # target's computed list of link dependencies (including static libraries
1934
  # if no such entry is already present.
1935
  for target in flat_list:
1936
    target_dict = targets[target]
1937
    target_type = target_dict['type']
1938
1939
    if target_type == 'static_library':
1940
      if not 'dependencies' in target_dict:
1941
        continue
1942
1943
      target_dict['dependencies_original'] = target_dict.get(
1944
          'dependencies', [])[:]
1945
1946
      # A static library should not depend on another static library unless
1947
      # the dependency relationship is "hard," which should only be done when
1948
      # a dependent relies on some side effect other than just the build
1949
      # product, like a rule or action output. Further, if a target has a
1950
      # non-hard dependency, but that dependency exports a hard dependency,
1951
      # the non-hard dependency can safely be removed, but the exported hard
1952
      # dependency must be added to the target to keep the same dependency
1953
      # ordering.
1954
      dependencies = \
1955
          dependency_nodes[target].DirectAndImportedDependencies(targets)
1956
      index = 0
1957
      while index < len(dependencies):
1958
        dependency = dependencies[index]
1959
        dependency_dict = targets[dependency]
1960
1961
        # Remove every non-hard static library dependency and remove every
1962
        # non-static library dependency that isn't a direct dependency.
1963
        if (dependency_dict['type'] == 'static_library' and \
1964
            not dependency_dict.get('hard_dependency', False)) or \
1965
           (dependency_dict['type'] != 'static_library' and \
1966
            not dependency in target_dict['dependencies']):
1967
          # Take the dependency out of the list, and don't increment index
1968
          # because the next dependency to analyze will shift into the index
1969
          # formerly occupied by the one being removed.
1970
          del dependencies[index]
1971
        else:
1972
          index = index + 1
1973
1974
      # Update the dependencies. If the dependencies list is empty, it's not
1975
      # needed, so unhook it.
1976
      if len(dependencies) > 0:
1977
        target_dict['dependencies'] = dependencies
1978
      else:
1979
        del target_dict['dependencies']
1980
1981
    elif target_type in linkable_types:
1982
      # Get a list of dependency targets that should be linked into this
1983
      # target.  Add them to the dependencies list if they're not already
1984
      # present.
1985
1986
      link_dependencies = \
1987
          dependency_nodes[target].DependenciesToLinkAgainst(targets)
1988
      for dependency in link_dependencies:
1989
        if dependency == target:
1990
          continue
1991
        if not 'dependencies' in target_dict:
1992
          target_dict['dependencies'] = []
1993
        if not dependency in target_dict['dependencies']:
1994
          target_dict['dependencies'].append(dependency)
1995
      # Sort the dependencies list in the order from dependents to dependencies.
1996
      # e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
1997
      # Note: flat_list is already sorted in the order from dependencies to
1998
      # dependents.
1999
      if sort_dependencies and 'dependencies' in target_dict:
2000
        target_dict['dependencies'] = [dep for dep in reversed(flat_list)
2001
                                       if dep in target_dict['dependencies']]
2002
2003
2004
# Initialize this here to speed up MakePathRelative.
2005
exception_re = re.compile(r'''["']?[-/$<>^]''')
2006
2007
2008
def MakePathRelative(to_file, fro_file, item):
2009
  # If item is a relative path, it's relative to the build file dict that it's
2010
  # coming from.  Fix it up to make it relative to the build file dict that
2011
  # it's going into.
2012
  # Exception: any |item| that begins with these special characters is
2013
  # returned without modification.
2014
  #   /   Used when a path is already absolute (shortcut optimization;
2015
  #       such paths would be returned as absolute anyway)
2016
  #   $   Used for build environment variables
2017
  #   -   Used for some build environment flags (such as -lapr-1 in a
2018
  #       "libraries" section)
2019
  #   <   Used for our own variable and command expansions (see ExpandVariables)
2020
  #   >   Used for our own variable and command expansions (see ExpandVariables)
2021
  #   ^   Used for our own variable and command expansions (see ExpandVariables)
2022
  #
2023
  #   "/' Used when a value is quoted.  If these are present, then we
2024
  #       check the second character instead.
2025
  #
2026
  if to_file == fro_file or exception_re.match(item):
2027
    return item
2028
  else:
2029
    # TODO(dglazkov) The backslash/forward-slash replacement at the end is a
2030
    # temporary measure. This should really be addressed by keeping all paths
2031
    # in POSIX until actual project generation.
2032
    ret = os.path.normpath(os.path.join(
2033
        gyp.common.RelativePath(os.path.dirname(fro_file),
2034
                                os.path.dirname(to_file)),
2035
                                item)).replace('\\', '/')
2036
    if item[-1] == '/':
2037
      ret += '/'
2038
    return ret
2039
2040
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
2041
  # Python documentation recommends objects which do not support hash
2042
  # set this value to None. Python library objects follow this rule.
2043
  is_hashable = lambda val: val.__hash__
2044
2045
  # If x is hashable, returns whether x is in s. Else returns whether x is in l.
2046
  def is_in_set_or_list(x, s, l):
2047
    if is_hashable(x):
2048
      return x in s
2049
    return x in l
2050
2051
  prepend_index = 0
2052
2053
  # Make membership testing of hashables in |to| (in particular, strings)
2054
  # faster.
2055
  hashable_to_set = set(x for x in to if is_hashable(x))
2056
  for item in fro:
2057
    singleton = False
2058
    if type(item) in (str, int):
2059
      # The cheap and easy case.
2060
      if is_paths:
2061
        to_item = MakePathRelative(to_file, fro_file, item)
2062
      else:
2063
        to_item = item
2064
2065
      if not (type(item) is str and item.startswith('-')):
2066
        # Any string that doesn't begin with a "-" is a singleton - it can
2067
        # only appear once in a list, to be enforced by the list merge append
2068
        # or prepend.
2069
        singleton = True
2070
    elif type(item) is dict:
2071
      # Make a copy of the dictionary, continuing to look for paths to fix.
2072
      # The other intelligent aspects of merge processing won't apply because
2073
      # item is being merged into an empty dict.
2074
      to_item = {}
2075
      MergeDicts(to_item, item, to_file, fro_file)
2076
    elif type(item) is list:
2077
      # Recurse, making a copy of the list.  If the list contains any
2078
      # descendant dicts, path fixing will occur.  Note that here, custom
2079
      # values for is_paths and append are dropped; those are only to be
2080
      # applied to |to| and |fro|, not sublists of |fro|.  append shouldn't
2081
      # matter anyway because the new |to_item| list is empty.
2082
      to_item = []
2083
      MergeLists(to_item, item, to_file, fro_file)
2084
    else:
2085
      raise TypeError(
2086
          'Attempt to merge list item of unsupported type ' + \
2087
          item.__class__.__name__)
2088
2089
    if append:
2090
      # If appending a singleton that's already in the list, don't append.
2091
      # This ensures that the earliest occurrence of the item will stay put.
2092
      if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
2093
        to.append(to_item)
2094
        if is_hashable(to_item):
2095
          hashable_to_set.add(to_item)
2096
    else:
2097
      # If prepending a singleton that's already in the list, remove the
2098
      # existing instance and proceed with the prepend.  This ensures that the
2099
      # item appears at the earliest possible position in the list.
2100
      while singleton and to_item in to:
2101
        to.remove(to_item)
2102
2103
      # Don't just insert everything at index 0.  That would prepend the new
2104
      # items to the list in reverse order, which would be an unwelcome
2105
      # surprise.
2106
      to.insert(prepend_index, to_item)
2107
      if is_hashable(to_item):
2108
        hashable_to_set.add(to_item)
2109
      prepend_index = prepend_index + 1
2110
2111
2112
def MergeDicts(to, fro, to_file, fro_file):
2113
  # I wanted to name the parameter "from" but it's a Python keyword...
2114
  for k, v in fro.iteritems():
2115
    # It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
2116
    # copy semantics.  Something else may want to merge from the |fro| dict
2117
    # later, and having the same dict ref pointed to twice in the tree isn't
2118
    # what anyone wants considering that the dicts may subsequently be
2119
    # modified.
2120
    if k in to:
2121
      bad_merge = False
2122
      if type(v) in (str, int):
2123
        if type(to[k]) not in (str, int):
2124
          bad_merge = True
2125
      elif type(v) is not type(to[k]):
2126
        bad_merge = True
2127
2128
      if bad_merge:
2129
        raise TypeError(
2130
            'Attempt to merge dict value of type ' + v.__class__.__name__ + \
2131
            ' into incompatible type ' + to[k].__class__.__name__ + \
2132
            ' for key ' + k)
2133
    if type(v) in (str, int):
2134
      # Overwrite the existing value, if any.  Cheap and easy.
2135
      is_path = IsPathSection(k)
2136
      if is_path:
2137
        to[k] = MakePathRelative(to_file, fro_file, v)
2138
      else:
2139
        to[k] = v
2140
    elif type(v) is dict:
2141
      # Recurse, guaranteeing copies will be made of objects that require it.
2142
      if not k in to:
2143
        to[k] = {}
2144
      MergeDicts(to[k], v, to_file, fro_file)
2145
    elif type(v) is list:
2146
      # Lists in dicts can be merged with different policies, depending on
2147
      # how the key in the "from" dict (k, the from-key) is written.
2148
      #
2149
      # If the from-key has          ...the to-list will have this action
2150
      # this character appended:...     applied when receiving the from-list:
2151
      #                           =  replace
2152
      #                           +  prepend
2153
      #                           ?  set, only if to-list does not yet exist
2154
      #                      (none)  append
2155
      #
2156
      # This logic is list-specific, but since it relies on the associated
2157
      # dict key, it's checked in this dict-oriented function.
2158
      ext = k[-1]
2159
      append = True
2160
      if ext == '=':
2161
        list_base = k[:-1]
2162
        lists_incompatible = [list_base, list_base + '?']
2163
        to[list_base] = []
2164
      elif ext == '+':
2165
        list_base = k[:-1]
2166
        lists_incompatible = [list_base + '=', list_base + '?']
2167
        append = False
2168
      elif ext == '?':
2169
        list_base = k[:-1]
2170
        lists_incompatible = [list_base, list_base + '=', list_base + '+']
2171
      else:
2172
        list_base = k
2173
        lists_incompatible = [list_base + '=', list_base + '?']
2174
2175
      # Some combinations of merge policies appearing together are meaningless.
2176
      # It's stupid to replace and append simultaneously, for example.  Append
2177
      # and prepend are the only policies that can coexist.
2178
      for list_incompatible in lists_incompatible:
2179
        if list_incompatible in fro:
2180
          raise GypError('Incompatible list policies ' + k + ' and ' +
2181
                         list_incompatible)
2182
2183
      if list_base in to:
2184
        if ext == '?':
2185
          # If the key ends in "?", the list will only be merged if it doesn't
2186
          # already exist.
2187
          continue
2188
        elif type(to[list_base]) is not list:
2189
          # This may not have been checked above if merging in a list with an
2190
          # extension character.
2191
          raise TypeError(
2192
              'Attempt to merge dict value of type ' + v.__class__.__name__ + \
2193
              ' into incompatible type ' + to[list_base].__class__.__name__ + \
2194
              ' for key ' + list_base + '(' + k + ')')
2195
      else:
2196
        to[list_base] = []
2197
2198
      # Call MergeLists, which will make copies of objects that require it.
2199
      # MergeLists can recurse back into MergeDicts, although this will be
2200
      # to make copies of dicts (with paths fixed), there will be no
2201
      # subsequent dict "merging" once entering a list because lists are
2202
      # always replaced, appended to, or prepended to.
2203
      is_paths = IsPathSection(list_base)
2204
      MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
2205
    else:
2206
      raise TypeError(
2207
          'Attempt to merge dict value of unsupported type ' + \
2208
          v.__class__.__name__ + ' for key ' + k)
2209
2210
2211
def MergeConfigWithInheritance(new_configuration_dict, build_file,
2212
                               target_dict, configuration, visited):
2213
  # Skip if previously visted.
2214
  if configuration in visited:
2215
    return
2216
2217
  # Look at this configuration.
2218
  configuration_dict = target_dict['configurations'][configuration]
2219
2220
  # Merge in parents.
2221
  for parent in configuration_dict.get('inherit_from', []):
2222
    MergeConfigWithInheritance(new_configuration_dict, build_file,
2223
                               target_dict, parent, visited + [configuration])
2224
2225
  # Merge it into the new config.
2226
  MergeDicts(new_configuration_dict, configuration_dict,
2227
             build_file, build_file)
2228
2229
  # Drop abstract.
2230
  if 'abstract' in new_configuration_dict:
2231
    del new_configuration_dict['abstract']
2232
2233
2234
def SetUpConfigurations(target, target_dict):
2235
  # key_suffixes is a list of key suffixes that might appear on key names.
2236
  # These suffixes are handled in conditional evaluations (for =, +, and ?)
2237
  # and rules/exclude processing (for ! and /).  Keys with these suffixes
2238
  # should be treated the same as keys without.
2239
  key_suffixes = ['=', '+', '?', '!', '/']
2240
2241
  build_file = gyp.common.BuildFile(target)
2242
2243
  # Provide a single configuration by default if none exists.
2244
  # TODO(mark): Signal an error if default_configurations exists but
2245
  # configurations does not.
2246
  if not 'configurations' in target_dict:
2247
    target_dict['configurations'] = {'Default': {}}
2248
  if not 'default_configuration' in target_dict:
2249
    concrete = [i for (i, config) in target_dict['configurations'].iteritems()
2250
                if not config.get('abstract')]
2251
    target_dict['default_configuration'] = sorted(concrete)[0]
2252
2253
  merged_configurations = {}
2254
  configs = target_dict['configurations']
2255
  for (configuration, old_configuration_dict) in configs.iteritems():
2256
    # Skip abstract configurations (saves work only).
2257
    if old_configuration_dict.get('abstract'):
2258
      continue
2259
    # Configurations inherit (most) settings from the enclosing target scope.
2260
    # Get the inheritance relationship right by making a copy of the target
2261
    # dict.
2262
    new_configuration_dict = {}
2263
    for (key, target_val) in target_dict.iteritems():
2264
      key_ext = key[-1:]
2265
      if key_ext in key_suffixes:
2266
        key_base = key[:-1]
2267
      else:
2268
        key_base = key
2269
      if not key_base in non_configuration_keys:
2270
        new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
2271
2272
    # Merge in configuration (with all its parents first).
2273
    MergeConfigWithInheritance(new_configuration_dict, build_file,
2274
                               target_dict, configuration, [])
2275
2276
    merged_configurations[configuration] = new_configuration_dict
2277
2278
  # Put the new configurations back into the target dict as a configuration.
2279
  for configuration in merged_configurations.keys():
2280
    target_dict['configurations'][configuration] = (
2281
        merged_configurations[configuration])
2282
2283
  # Now drop all the abstract ones.
2284
  for configuration in target_dict['configurations'].keys():
2285
    old_configuration_dict = target_dict['configurations'][configuration]
2286
    if old_configuration_dict.get('abstract'):
2287
      del target_dict['configurations'][configuration]
2288
2289
  # Now that all of the target's configurations have been built, go through
2290
  # the target dict's keys and remove everything that's been moved into a
2291
  # "configurations" section.
2292
  delete_keys = []
2293
  for key in target_dict:
2294
    key_ext = key[-1:]
2295
    if key_ext in key_suffixes:
2296
      key_base = key[:-1]
2297
    else:
2298
      key_base = key
2299
    if not key_base in non_configuration_keys:
2300
      delete_keys.append(key)
2301
  for key in delete_keys:
2302
    del target_dict[key]
2303
2304
  # Check the configurations to see if they contain invalid keys.
2305
  for configuration in target_dict['configurations'].keys():
2306
    configuration_dict = target_dict['configurations'][configuration]
2307
    for key in configuration_dict.keys():
2308
      if key in invalid_configuration_keys:
2309
        raise GypError('%s not allowed in the %s configuration, found in '
2310
                       'target %s' % (key, configuration, target))
2311
2312
2313
2314
def ProcessListFiltersInDict(name, the_dict):
2315
  """Process regular expression and exclusion-based filters on lists.
2316
2317
  An exclusion list is in a dict key named with a trailing "!", like
2318
  "sources!".  Every item in such a list is removed from the associated
2319
  main list, which in this example, would be "sources".  Removed items are
2320
  placed into a "sources_excluded" list in the dict.
2321
2322
  Regular expression (regex) filters are contained in dict keys named with a
2323
  trailing "/", such as "sources/" to operate on the "sources" list.  Regex
2324
  filters in a dict take the form:
2325
    'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
2326
                  ['include', '_mac\\.cc$'] ],
2327
  The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
2328
  _win.cc.  The second filter then includes all files ending in _mac.cc that
2329
  are now or were once in the "sources" list.  Items matching an "exclude"
2330
  filter are subject to the same processing as would occur if they were listed
2331
  by name in an exclusion list (ending in "!").  Items matching an "include"
2332
  filter are brought back into the main list if previously excluded by an
2333
  exclusion list or exclusion regex filter.  Subsequent matching "exclude"
2334
  patterns can still cause items to be excluded after matching an "include".
2335
  """
2336
2337
  # Look through the dictionary for any lists whose keys end in "!" or "/".
2338
  # These are lists that will be treated as exclude lists and regular
2339
  # expression-based exclude/include lists.  Collect the lists that are
2340
  # needed first, looking for the lists that they operate on, and assemble
2341
  # then into |lists|.  This is done in a separate loop up front, because
2342
  # the _included and _excluded keys need to be added to the_dict, and that
2343
  # can't be done while iterating through it.
2344
2345
  lists = []
2346
  del_lists = []
2347
  for key, value in the_dict.iteritems():
2348
    operation = key[-1]
2349
    if operation != '!' and operation != '/':
2350
      continue
2351
2352
    if type(value) is not list:
2353
      raise ValueError(name + ' key ' + key + ' must be list, not ' + \
2354
                       value.__class__.__name__)
2355
2356
    list_key = key[:-1]
2357
    if list_key not in the_dict:
2358
      # This happens when there's a list like "sources!" but no corresponding
2359
      # "sources" list.  Since there's nothing for it to operate on, queue up
2360
      # the "sources!" list for deletion now.
2361
      del_lists.append(key)
2362
      continue
2363
2364
    if type(the_dict[list_key]) is not list:
2365
      value = the_dict[list_key]
2366
      raise ValueError(name + ' key ' + list_key + \
2367
                       ' must be list, not ' + \
2368
                       value.__class__.__name__ + ' when applying ' + \
2369
                       {'!': 'exclusion', '/': 'regex'}[operation])
2370
2371
    if not list_key in lists:
2372
      lists.append(list_key)
2373
2374
  # Delete the lists that are known to be unneeded at this point.
2375
  for del_list in del_lists:
2376
    del the_dict[del_list]
2377
2378
  for list_key in lists:
2379
    the_list = the_dict[list_key]
2380
2381
    # Initialize the list_actions list, which is parallel to the_list.  Each
2382
    # item in list_actions identifies whether the corresponding item in
2383
    # the_list should be excluded, unconditionally preserved (included), or
2384
    # whether no exclusion or inclusion has been applied.  Items for which
2385
    # no exclusion or inclusion has been applied (yet) have value -1, items
2386
    # excluded have value 0, and items included have value 1.  Includes and
2387
    # excludes override previous actions.  All items in list_actions are
2388
    # initialized to -1 because no excludes or includes have been processed
2389
    # yet.
2390
    list_actions = list((-1,) * len(the_list))
2391
2392
    exclude_key = list_key + '!'
2393
    if exclude_key in the_dict:
2394
      for exclude_item in the_dict[exclude_key]:
2395
        for index in xrange(0, len(the_list)):
2396
          if exclude_item == the_list[index]:
2397
            # This item matches the exclude_item, so set its action to 0
2398
            # (exclude).
2399
            list_actions[index] = 0
2400
2401
      # The "whatever!" list is no longer needed, dump it.
2402
      del the_dict[exclude_key]
2403
2404
    regex_key = list_key + '/'
2405
    if regex_key in the_dict:
2406
      for regex_item in the_dict[regex_key]:
2407
        [action, pattern] = regex_item
2408
        pattern_re = re.compile(pattern)
2409
2410
        if action == 'exclude':
2411
          # This item matches an exclude regex, so set its value to 0 (exclude).
2412
          action_value = 0
2413
        elif action == 'include':
2414
          # This item matches an include regex, so set its value to 1 (include).
2415
          action_value = 1
2416
        else:
2417
          # This is an action that doesn't make any sense.
2418
          raise ValueError('Unrecognized action ' + action + ' in ' + name + \
2419
                           ' key ' + regex_key)
2420
2421
        for index in xrange(0, len(the_list)):
2422
          list_item = the_list[index]
2423
          if list_actions[index] == action_value:
2424
            # Even if the regex matches, nothing will change so continue (regex
2425
            # searches are expensive).
2426
            continue
2427
          if pattern_re.search(list_item):
2428
            # Regular expression match.
2429
            list_actions[index] = action_value
2430
2431
      # The "whatever/" list is no longer needed, dump it.
2432
      del the_dict[regex_key]
2433
2434
    # Add excluded items to the excluded list.
2435
    #
2436
    # Note that exclude_key ("sources!") is different from excluded_key
2437
    # ("sources_excluded").  The exclude_key list is input and it was already
2438
    # processed and deleted; the excluded_key list is output and it's about
2439
    # to be created.
2440
    excluded_key = list_key + '_excluded'
2441
    if excluded_key in the_dict:
2442
      raise GypError(name + ' key ' + excluded_key +
2443
                     ' must not be present prior '
2444
                     ' to applying exclusion/regex filters for ' + list_key)
2445
2446
    excluded_list = []
2447
2448
    # Go backwards through the list_actions list so that as items are deleted,
2449
    # the indices of items that haven't been seen yet don't shift.  That means
2450
    # that things need to be prepended to excluded_list to maintain them in the
2451
    # same order that they existed in the_list.
2452
    for index in xrange(len(list_actions) - 1, -1, -1):
2453
      if list_actions[index] == 0:
2454
        # Dump anything with action 0 (exclude).  Keep anything with action 1
2455
        # (include) or -1 (no include or exclude seen for the item).
2456
        excluded_list.insert(0, the_list[index])
2457
        del the_list[index]
2458
2459
    # If anything was excluded, put the excluded list into the_dict at
2460
    # excluded_key.
2461
    if len(excluded_list) > 0:
2462
      the_dict[excluded_key] = excluded_list
2463
2464
  # Now recurse into subdicts and lists that may contain dicts.
2465
  for key, value in the_dict.iteritems():
2466
    if type(value) is dict:
2467
      ProcessListFiltersInDict(key, value)
2468
    elif type(value) is list:
2469
      ProcessListFiltersInList(key, value)
2470
2471
2472
def ProcessListFiltersInList(name, the_list):
2473
  for item in the_list:
2474
    if type(item) is dict:
2475
      ProcessListFiltersInDict(name, item)
2476
    elif type(item) is list:
2477
      ProcessListFiltersInList(name, item)
2478
2479
2480
def ValidateTargetType(target, target_dict):
2481
  """Ensures the 'type' field on the target is one of the known types.
2482
2483
  Arguments:
2484
    target: string, name of target.
2485
    target_dict: dict, target spec.
2486
2487
  Raises an exception on error.
2488
  """
2489
  VALID_TARGET_TYPES = ('executable', 'loadable_module',
2490
                        'static_library', 'shared_library',
2491
                        'mac_kernel_extension', 'none')
2492
  target_type = target_dict.get('type', None)
2493
  if target_type not in VALID_TARGET_TYPES:
2494
    raise GypError("Target %s has an invalid target type '%s'.  "
2495
                   "Must be one of %s." %
2496
                   (target, target_type, '/'.join(VALID_TARGET_TYPES)))
2497
  if (target_dict.get('standalone_static_library', 0) and
2498
      not target_type == 'static_library'):
2499
    raise GypError('Target %s has type %s but standalone_static_library flag is'
2500
                   ' only valid for static_library type.' % (target,
2501
                                                             target_type))
2502
2503
2504
def ValidateSourcesInTarget(target, target_dict, build_file,
2505
                            duplicate_basename_check):
2506
  if not duplicate_basename_check:
2507
    return
2508
  if target_dict.get('type', None) != 'static_library':
2509
    return
2510
  sources = target_dict.get('sources', [])
2511
  basenames = {}
2512
  for source in sources:
2513
    name, ext = os.path.splitext(source)
2514
    is_compiled_file = ext in [
2515
        '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
2516
    if not is_compiled_file:
2517
      continue
2518
    basename = os.path.basename(name)  # Don't include extension.
2519
    basenames.setdefault(basename, []).append(source)
2520
2521
  error = ''
2522
  for basename, files in basenames.iteritems():
2523
    if len(files) > 1:
2524
      error += '  %s: %s\n' % (basename, ' '.join(files))
2525
2526
  if error:
2527
    print('static library %s has several files with the same basename:\n' %
2528
          target + error + 'libtool on Mac cannot handle that. Use '
2529
          '--no-duplicate-basename-check to disable this validation.')
2530
    raise GypError('Duplicate basenames in sources section, see list above')
2531
2532
2533
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
2534
  """Ensures that the rules sections in target_dict are valid and consistent,
2535
  and determines which sources they apply to.
2536
2537
  Arguments:
2538
    target: string, name of target.
2539
    target_dict: dict, target spec containing "rules" and "sources" lists.
2540
    extra_sources_for_rules: a list of keys to scan for rule matches in
2541
        addition to 'sources'.
2542
  """
2543
2544
  # Dicts to map between values found in rules' 'rule_name' and 'extension'
2545
  # keys and the rule dicts themselves.
2546
  rule_names = {}
2547
  rule_extensions = {}
2548
2549
  rules = target_dict.get('rules', [])
2550
  for rule in rules:
2551
    # Make sure that there's no conflict among rule names and extensions.
2552
    rule_name = rule['rule_name']
2553
    if rule_name in rule_names:
2554
      raise GypError('rule %s exists in duplicate, target %s' %
2555
                     (rule_name, target))
2556
    rule_names[rule_name] = rule
2557
2558
    rule_extension = rule['extension']
2559
    if rule_extension.startswith('.'):
2560
      rule_extension = rule_extension[1:]
2561
    if rule_extension in rule_extensions:
2562
      raise GypError(('extension %s associated with multiple rules, ' +
2563
                      'target %s rules %s and %s') %
2564
                     (rule_extension, target,
2565
                      rule_extensions[rule_extension]['rule_name'],
2566
                      rule_name))
2567
    rule_extensions[rule_extension] = rule
2568
2569
    # Make sure rule_sources isn't already there.  It's going to be
2570
    # created below if needed.
2571
    if 'rule_sources' in rule:
2572
      raise GypError(
2573
            'rule_sources must not exist in input, target %s rule %s' %
2574
            (target, rule_name))
2575
2576
    rule_sources = []
2577
    source_keys = ['sources']
2578
    source_keys.extend(extra_sources_for_rules)
2579
    for source_key in source_keys:
2580
      for source in target_dict.get(source_key, []):
2581
        (source_root, source_extension) = os.path.splitext(source)
2582
        if source_extension.startswith('.'):
2583
          source_extension = source_extension[1:]
2584
        if source_extension == rule_extension:
2585
          rule_sources.append(source)
2586
2587
    if len(rule_sources) > 0:
2588
      rule['rule_sources'] = rule_sources
2589
2590
2591
def ValidateRunAsInTarget(target, target_dict, build_file):
2592
  target_name = target_dict.get('target_name')
2593
  run_as = target_dict.get('run_as')
2594
  if not run_as:
2595
    return
2596
  if type(run_as) is not dict:
2597
    raise GypError("The 'run_as' in target %s from file %s should be a "
2598
                   "dictionary." %
2599
                   (target_name, build_file))
2600
  action = run_as.get('action')
2601
  if not action:
2602
    raise GypError("The 'run_as' in target %s from file %s must have an "
2603
                   "'action' section." %
2604
                   (target_name, build_file))
2605
  if type(action) is not list:
2606
    raise GypError("The 'action' for 'run_as' in target %s from file %s "
2607
                   "must be a list." %
2608
                   (target_name, build_file))
2609
  working_directory = run_as.get('working_directory')
2610
  if working_directory and type(working_directory) is not str:
2611
    raise GypError("The 'working_directory' for 'run_as' in target %s "
2612
                   "in file %s should be a string." %
2613
                   (target_name, build_file))
2614
  environment = run_as.get('environment')
2615
  if environment and type(environment) is not dict:
2616
    raise GypError("The 'environment' for 'run_as' in target %s "
2617
                   "in file %s should be a dictionary." %
2618
                   (target_name, build_file))
2619
2620
2621
def ValidateActionsInTarget(target, target_dict, build_file):
2622
  '''Validates the inputs to the actions in a target.'''
2623
  target_name = target_dict.get('target_name')
2624
  actions = target_dict.get('actions', [])
2625
  for action in actions:
2626
    action_name = action.get('action_name')
2627
    if not action_name:
2628
      raise GypError("Anonymous action in target %s.  "
2629
                     "An action must have an 'action_name' field." %
2630
                     target_name)
2631
    inputs = action.get('inputs', None)
2632
    if inputs is None:
2633
      raise GypError('Action in target %s has no inputs.' % target_name)
2634
    action_command = action.get('action')
2635
    if action_command and not action_command[0]:
2636
      raise GypError("Empty action as command in target %s." % target_name)
2637
2638
2639
def TurnIntIntoStrInDict(the_dict):
2640
  """Given dict the_dict, recursively converts all integers into strings.
2641
  """
2642
  # Use items instead of iteritems because there's no need to try to look at
2643
  # reinserted keys and their associated values.
2644
  for k, v in the_dict.items():
2645
    if type(v) is int:
2646
      v = str(v)
2647
      the_dict[k] = v
2648
    elif type(v) is dict:
2649
      TurnIntIntoStrInDict(v)
2650
    elif type(v) is list:
2651
      TurnIntIntoStrInList(v)
2652
2653
    if type(k) is int:
2654
      del the_dict[k]
2655
      the_dict[str(k)] = v
2656
2657
2658
def TurnIntIntoStrInList(the_list):
2659
  """Given list the_list, recursively converts all integers into strings.
2660
  """
2661
  for index in xrange(0, len(the_list)):
2662
    item = the_list[index]
2663
    if type(item) is int:
2664
      the_list[index] = str(item)
2665
    elif type(item) is dict:
2666
      TurnIntIntoStrInDict(item)
2667
    elif type(item) is list:
2668
      TurnIntIntoStrInList(item)
2669
2670
2671
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
2672
                         data):
2673
  """Return only the targets that are deep dependencies of |root_targets|."""
2674
  qualified_root_targets = []
2675
  for target in root_targets:
2676
    target = target.strip()
2677
    qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
2678
    if not qualified_targets:
2679
      raise GypError("Could not find target %s" % target)
2680
    qualified_root_targets.extend(qualified_targets)
2681
2682
  wanted_targets = {}
2683
  for target in qualified_root_targets:
2684
    wanted_targets[target] = targets[target]
2685
    for dependency in dependency_nodes[target].DeepDependencies():
2686
      wanted_targets[dependency] = targets[dependency]
2687
2688
  wanted_flat_list = [t for t in flat_list if t in wanted_targets]
2689
2690
  # Prune unwanted targets from each build_file's data dict.
2691
  for build_file in data['target_build_files']:
2692
    if not 'targets' in data[build_file]:
2693
      continue
2694
    new_targets = []
2695
    for target in data[build_file]['targets']:
2696
      qualified_name = gyp.common.QualifiedTarget(build_file,
2697
                                                  target['target_name'],
2698
                                                  target['toolset'])
2699
      if qualified_name in wanted_targets:
2700
        new_targets.append(target)
2701
    data[build_file]['targets'] = new_targets
2702
2703
  return wanted_targets, wanted_flat_list
2704
2705
2706
def VerifyNoCollidingTargets(targets):
2707
  """Verify that no two targets in the same directory share the same name.
2708
2709
  Arguments:
2710
    targets: A list of targets in the form 'path/to/file.gyp:target_name'.
2711
  """
2712
  # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
2713
  used = {}
2714
  for target in targets:
2715
    # Separate out 'path/to/file.gyp, 'target_name' from
2716
    # 'path/to/file.gyp:target_name'.
2717
    path, name = target.rsplit(':', 1)
2718
    # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
2719
    subdir, gyp = os.path.split(path)
2720
    # Use '.' for the current directory '', so that the error messages make
2721
    # more sense.
2722
    if not subdir:
2723
      subdir = '.'
2724
    # Prepare a key like 'path/to:target_name'.
2725
    key = subdir + ':' + name
2726
    if key in used:
2727
      # Complain if this target is already used.
2728
      raise GypError('Duplicate target name "%s" in directory "%s" used both '
2729
                     'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
2730
    used[key] = gyp
2731
2732
2733
def SetGeneratorGlobals(generator_input_info):
2734
  # Set up path_sections and non_configuration_keys with the default data plus
2735
  # the generator-specific data.
2736
  global path_sections
2737
  path_sections = set(base_path_sections)
2738
  path_sections.update(generator_input_info['path_sections'])
2739
2740
  global non_configuration_keys
2741
  non_configuration_keys = base_non_configuration_keys[:]
2742
  non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
2743
2744
  global multiple_toolsets
2745
  multiple_toolsets = generator_input_info[
2746
      'generator_supports_multiple_toolsets']
2747
2748
  global generator_filelist_paths
2749
  generator_filelist_paths = generator_input_info['generator_filelist_paths']
2750
2751
2752
def Load(build_files, variables, includes, depth, generator_input_info, check,
2753
         circular_check, duplicate_basename_check, parallel, root_targets):
2754
  SetGeneratorGlobals(generator_input_info)
2755
  # A generator can have other lists (in addition to sources) be processed
2756
  # for rules.
2757
  extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
2758
2759
  # Load build files.  This loads every target-containing build file into
2760
  # the |data| dictionary such that the keys to |data| are build file names,
2761
  # and the values are the entire build file contents after "early" or "pre"
2762
  # processing has been done and includes have been resolved.
2763
  # NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
2764
  # well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
2765
  # track of the keys corresponding to "target" files.
2766
  data = {'target_build_files': set()}
2767
  # Normalize paths everywhere.  This is important because paths will be
2768
  # used as keys to the data dict and for references between input files.
2769
  build_files = set(map(os.path.normpath, build_files))
2770
  if parallel:
2771
    LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
2772
                                 check, generator_input_info)
2773
  else:
2774
    aux_data = {}
2775
    for build_file in build_files:
2776
      try:
2777
        LoadTargetBuildFile(build_file, data, aux_data,
2778
                            variables, includes, depth, check, True)
2779
      except Exception, e:
2780
        gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
2781
        raise
2782
2783
  # Build a dict to access each target's subdict by qualified name.
2784
  targets = BuildTargetsDict(data)
2785
2786
  # Fully qualify all dependency links.
2787
  QualifyDependencies(targets)
2788
2789
  # Remove self-dependencies from targets that have 'prune_self_dependencies'
2790
  # set to 1.
2791
  RemoveSelfDependencies(targets)
2792
2793
  # Expand dependencies specified as build_file:*.
2794
  ExpandWildcardDependencies(targets, data)
2795
2796
  # Remove all dependencies marked as 'link_dependency' from the targets of
2797
  # type 'none'.
2798
  RemoveLinkDependenciesFromNoneTargets(targets)
2799
2800
  # Apply exclude (!) and regex (/) list filters only for dependency_sections.
2801
  for target_name, target_dict in targets.iteritems():
2802
    tmp_dict = {}
2803
    for key_base in dependency_sections:
2804
      for op in ('', '!', '/'):
2805
        key = key_base + op
2806
        if key in target_dict:
2807
          tmp_dict[key] = target_dict[key]
2808
          del target_dict[key]
2809
    ProcessListFiltersInDict(target_name, tmp_dict)
2810
    # Write the results back to |target_dict|.
2811
    for key in tmp_dict:
2812
      target_dict[key] = tmp_dict[key]
2813
2814
  # Make sure every dependency appears at most once.
2815
  RemoveDuplicateDependencies(targets)
2816
2817
  if circular_check:
2818
    # Make sure that any targets in a.gyp don't contain dependencies in other
2819
    # .gyp files that further depend on a.gyp.
2820
    VerifyNoGYPFileCircularDependencies(targets)
2821
2822
  [dependency_nodes, flat_list] = BuildDependencyList(targets)
2823
2824
  if root_targets:
2825
    # Remove, from |targets| and |flat_list|, the targets that are not deep
2826
    # dependencies of the targets specified in |root_targets|.
2827
    targets, flat_list = PruneUnwantedTargets(
2828
        targets, flat_list, dependency_nodes, root_targets, data)
2829
2830
  # Check that no two targets in the same directory have the same name.
2831
  VerifyNoCollidingTargets(flat_list)
2832
2833
  # Handle dependent settings of various types.
2834
  for settings_type in ['all_dependent_settings',
2835
                        'direct_dependent_settings',
2836
                        'link_settings']:
2837
    DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
2838
2839
    # Take out the dependent settings now that they've been published to all
2840
    # of the targets that require them.
2841
    for target in flat_list:
2842
      if settings_type in targets[target]:
2843
        del targets[target][settings_type]
2844
2845
  # Make sure static libraries don't declare dependencies on other static
2846
  # libraries, but that linkables depend on all unlinked static libraries
2847
  # that they need so that their link steps will be correct.
2848
  gii = generator_input_info
2849
  if gii['generator_wants_static_library_dependencies_adjusted']:
2850
    AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
2851
                                    gii['generator_wants_sorted_dependencies'])
2852
2853
  # Apply "post"/"late"/"target" variable expansions and condition evaluations.
2854
  for target in flat_list:
2855
    target_dict = targets[target]
2856
    build_file = gyp.common.BuildFile(target)
2857
    ProcessVariablesAndConditionsInDict(
2858
        target_dict, PHASE_LATE, variables, build_file)
2859
2860
  # Move everything that can go into a "configurations" section into one.
2861
  for target in flat_list:
2862
    target_dict = targets[target]
2863
    SetUpConfigurations(target, target_dict)
2864
2865
  # Apply exclude (!) and regex (/) list filters.
2866
  for target in flat_list:
2867
    target_dict = targets[target]
2868
    ProcessListFiltersInDict(target, target_dict)
2869
2870
  # Apply "latelate" variable expansions and condition evaluations.
2871
  for target in flat_list:
2872
    target_dict = targets[target]
2873
    build_file = gyp.common.BuildFile(target)
2874
    ProcessVariablesAndConditionsInDict(
2875
        target_dict, PHASE_LATELATE, variables, build_file)
2876
2877
  # Make sure that the rules make sense, and build up rule_sources lists as
2878
  # needed.  Not all generators will need to use the rule_sources lists, but
2879
  # some may, and it seems best to build the list in a common spot.
2880
  # Also validate actions and run_as elements in targets.
2881
  for target in flat_list:
2882
    target_dict = targets[target]
2883
    build_file = gyp.common.BuildFile(target)
2884
    ValidateTargetType(target, target_dict)
2885
    ValidateSourcesInTarget(target, target_dict, build_file,
2886
                            duplicate_basename_check)
2887
    ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
2888
    ValidateRunAsInTarget(target, target_dict, build_file)
2889
    ValidateActionsInTarget(target, target_dict, build_file)
2890
2891
  # Generators might not expect ints.  Turn them into strs.
2892
  TurnIntIntoStrInDict(data)
2893
2894
  # TODO(mark): Return |data| for now because the generator needs a list of
2895
  # build files that came in.  In the future, maybe it should just accept
2896
  # a list, and not the whole data dict.
2897
  return [flat_list, targets, data]
2898