FLEXPART Testing Environment CTBTO WO8
 All Classes Namespaces Files Functions Variables Pages
read_header.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 """
3 
4 D. Arnold
5 D. Morton
6 
7 This is taken froma version of Don Morton of pflexible
8 For original pflexible contact J. Bukhardt
9 
10 and adapted so that we only have the reading of the header
11 for FLEXPART version 8.X and further. Some other things have
12 been modified. Classes and helper functions also from pflexible
13 """
14 # builtin imports
15 import pdb
16 import sys
17 import os
18 
19 import struct
20 import re
21 import optparse
22 import time
23 import datetime
24 import itertools
25 from math import pi, sqrt, cos
26 import traceback
27 
28 
29 
30 # Dependencies:
31 # Numpy
32 import numpy as np
33 
34 #### Functions for reading FLEXPART output #####
35 
36 def gridarea(H):
37  """returns an array of area corresponding to each nx,ny,nz
38 
39  Usage::
40 
41  > area = gridarea(H)
42 
43 
44  Returns
45  OUT = array area corresponding to nx,ny,nz
46 
47  Arguments
48  H = :class:`Header` object from readheader function.
49 
50  """
51 
52 
53  pih = pi / 180.
54  r_earth = 6.371e6
55  cosfunc = lambda y : cos(y * pih) * r_earth
56 
57  nx = H['numxgrid']
58  ny = H['numygrid']
59  outlat0 = H['outlat0']
60  dyout = H['dyout']
61  dxout = H['dxout']
62  area = np.zeros((nx, ny))
63 
64  for iy in range(ny):
65  ylata = outlat0 + (float(iy) + 0.5) * dyout # NEED TO Check this, iy since arrays are 0-index
66  ylatp = ylata + 0.5 * dyout
67  ylatm = ylata - 0.5 * dyout
68  if (ylatm < 0 and ylatp > 0): hzone = dyout * r_earth * pih
69  else:
70  # cosfact = cosfunc(ylata)
71  cosfactp = cosfunc(ylatp)
72  cosfactm = cosfunc(ylatm)
73  if cosfactp < cosfactm:
74  hzone = sqrt(r_earth ** 2 - cosfactp ** 2) - sqrt(r_earth ** 2 - cosfactm ** 2)
75  else:
76  hzone = sqrt(r_earth ** 2 - cosfactm ** 2) - sqrt(r_earth ** 2 - cosfactp ** 2)
77 
78  gridarea = 2.*pi * r_earth * hzone * dxout / 360.
79  for ix in range(nx):
80  area[ix, iy] = gridarea
81 
82  return area
83 
85  """
86  Open and read the binary file (bf) header only to the point of
87  the Flexpart version string, return the string, seek to the
88  start of the file.
89 
90  """
91  try:
92  bf = BinaryFile(bf)
93  except:
94  bf = bf
95 
96  ret = bf.tell()
97  bf.seek(12) # start of version string
98  version = bf.read('13S')
99  bf.seek(ret)
100 
101  return version
102 
103 def read_header(pathname, **kwargs):
104  """
105  The readheader function returns a special class (Structure) which behaves
106  like a dictionary. It contains all the metadata from the simulation which
107  is contained in the "header" or "header_nest" binary files from the model
108  output.
109 
110  .. warning::
111  It is recommended to use the :class:`Header` class: H = pf.Header(path)
112 
113  This version is using the BinaryFile class rather than FortFlex.
114 
115  Usage::
116 
117  > H = read_header(pathname) #Don't include header filename
118 
119  Returns a dictionary
120 
121  H = dictionary like object with all the run metadata.
122  Arguments
123 
124  .. tabularcolumns:: |l|L|
125 
126  ============= ========================================
127  keyword Description [default]
128  ============= ========================================
129  pathname FLEXPART run output directory
130  readp read release points 0=no, [1]=y
131  nested nested output [False] or True
132  headerfile provide custom name for header file
133  datefile provide a custom name for the date file
134  verbose print information while loading header
135  ============= ========================================
136 
137  .. note::
138  **This function is in development**
139 
140  Please report any bugs found.
141 
142  .. TODO::
143 
144  probably a lot of things, among which ...
145 
146  [] choose skip/getbin or direct seek/read
147  [] define output keys in docstring
148 
149 
150  .. note::
151  The user is no longer required to indicate which version of FLEXPART
152  the header is from. Checks are made, and the header is read accordingly.
153  Please report any problems...
154 
155  """
156 
157  OPS = Structure()
158  OPS.readp = True
159  OPS.nested = False
160  OPS.ltopo = 1 # 1 for AGL, 0 for ASL
161  OPS.verbose = False
162  OPS.headerfile = None
163  OPS.datefile = None
164 
165  # # add keyword overides and options to header
166  for k in kwargs.keys():
167  if k not in OPS.keys():
168  print("WARNING: {0} not a valid input option.".format(k))
169 
170  # BW compat fixes
171  if 'nest' in kwargs.keys():
172  raise IOError("nest is no longer a valid keyword, see docs. \n Now use nested=True or nested=False")
173 
174 
175  if 'nested' in kwargs.keys():
176  if kwargs['nested'] is 1:
177  print("WARNING, use of nested=1, deprecated converting to nested=True")
178  kwargs['nested'] = True
179 
180  if 'nested' in kwargs.keys():
181  if kwargs['nested'] is 0:
182  print("WARNING, use of nested=0, deprecated converting to nested=False")
183  kwargs['nested'] = False
184 
185  OPS.update(kwargs)
186 
187  if OPS.verbose:
188  print "Reading Header with:\n"
189 
190  for o in OPS:
191  print "%s ==> %s" % (o, OPS[o])
192 
193  # Define utility functions for reading binary file
194  skip = lambda n = 8 : bf.seek(n, 1)
195  getbin = lambda dtype, n = 1 : bf.read(dtype, (n,))
196 
197 
198  h = Structure()
199 
200 
201  if OPS.headerfile:
202  filename = os.path.join(pathname, OPS.headerfile);
203 
204  elif OPS.nested is True:
205  filename = os.path.join(pathname, 'header_nest')
206  h['nested'] = True;
207  else:
208  filename = os.path.join(pathname, 'header')
209  h['nested'] = False;
210 
211  # Open header file in binary format
212  if not os.path.exists(filename):
213  raise IOError("No such file: {0}".format(filename))
214  else:
215  try:
216  bf = BinaryFile(filename, order="fortran")
217  except:
218  raise IOError("Error opening: {0} with BinaryFile class".format(filename))
219 
220  # Get available_dates from dates file in same directory as header
221  if OPS.datefile:
222  datefile = os.path.join(pathname, OPS.datefile)
223  else:
224  datefile = os.path.join(pathname, 'dates')
225 
226  if not os.path.exists(datefile):
227  raise IOError("No DATEFILE: {0}".format(datefile))
228  else:
229  try:
230  fd = file(datefile, 'r').readlines()
231  except:
232  raise IOError("Could not read datefile: {0}".format(datefile))
233 
234  # get rid of any duplicate dates (a fix for the forecast system)
235  fd = sorted(list(set(fd)))
236  h['available_dates'] = [d.strip('\n') for d in fd]
237 
238 
239  # which version format is header file:
240  version = _get_header_version(bf)
241 
242 
243  # required containers
244  junk = [] # for catching unused output
245  h['nz_list'] = []
246  h['species'] = []
247  h['wetdep'] = []
248  h['drydep'] = []
249  h['ireleasestart'] = []
250  h['ireleaseend'] = []
251  h['compoint'] = []
252 
253  # Define Header format and create Dictionary Keys
254  I = {0:'_0', 1:'ibdate', 2:'ibtime', 3:'flexpart', \
255  4:'_1', 5:'loutstep', 6:'loutaver', 7:'loutsample', \
256  8:'_2', 9:'outlon0', 10:'outlat0', 11:'numxgrid', \
257  12:'numygrid', 13:'dxout', 14:'dyout', 15:'_3', 16:'numzgrid', \
258  }
259  # format for binary reading first part of the header file
260  Dfmt = ['i', 'i', 'i', '13S', '2i', 'i', 'i', 'i', '2i', 'f', 'f', 'i', 'i', 'f', 'f', '2i', 'i']
261  if bf:
262  a = [bf.read(fmt) for fmt in Dfmt]
263  for j in range(len(a)):
264  h[I[j]] = a[j]
265 
266  h['outheight'] = np.array([bf.read('f') for i in range(h['numzgrid'])])
267  junk.append(bf.read('2i'))
268  h['jjjjmmdd'] = bf.read('i')
269  h['hhmmss'] = bf.read('i')
270  junk.append(bf.read('2i'))
271  h['nspec'] = bf.read('i') / 3 # why!?
272  h['numpointspec'] = bf.read('i')
273  junk.append(bf.read('2i'))
274 
275  # Read in the species names and levels for each nspec
276  # temp dictionaries
277  for i in range(h['nspec']):
278  if 'V6' in version:
279 
280  h['wetdep'].append(''.join([bf.read('c') for i in range(10)]).strip())
281  junk.append(bf.read('2i'))
282  junk.append(bf.read('i'))
283  h['drydep'].append(''.join([bf.read('c') for i in range(10)]).strip())
284  junk.append(bf.read('2i'))
285  h['nz_list'].append(getbin('i'))
286  h['species'].append(''.join([getbin('c') for i in range(10)]).strip())
287 
288  else:
289 
290  junk.append(bf.read('i'))
291  h['wetdep'].append(''.join([bf.read('c') for i in range(10)]).strip())
292  junk.append(bf.read('2i'))
293  junk.append(bf.read('i'))
294  h['drydep'].append(''.join([bf.read('c') for i in range(10)]).strip())
295  junk.append(bf.read('2i'))
296  h['nz_list'].append(bf.read('i'))
297  h['species'].append(''.join([bf.read('c') for i in range(10)]).strip())
298  junk.append(bf.read('2i'))
299 
300  if 'V6' in version:
301  bf.seek(8, 1)
302  # pdb.set_trace()
303  h['numpoint'] = bf.read('i')
304 
305  # read release info if requested
306  # if OPS.readp pass has changed, we cycle through once,
307  # then break the loop if OPS.readp is false. This is done
308  # in order to get some of the information into the header.
309  before_readp = bf.tell()
310 
311  # initialise release fields
312  I = {2:'kindz', 3:'xp1', 4:'yp1', 5:'xp2', \
313  6:'yp2', 7:'zpoint1', 8:'zpoint2', 9:'npart', 10:'mpart'}
314 
315  for k, v in I.iteritems():
316  h[v] = np.zeros(h['numpoint']) # create zero-filled lists in H dict
317 
318  h['xmass'] = np.zeros((h['numpoint'], h['nspec']))
319 
320 
321  if 'V6' in version:
322  skip()
323  _readV6(bf, h)
324  else:
325  junk.append(bf.read('i'))
326  for i in range(h['numpoint']):
327  junk.append(bf.read('i'))
328 
329  h['ireleasestart'].append(bf.read('i'))
330  h['ireleaseend'].append(bf.read('i'))
331  h['kindz'][i] = bf.read('h') # This is an int16, might need to to change something
332  junk.append(bf.read('2i'))
333 
334  h['xp1'][i] = bf.read('f')
335  h['yp1'][i] = bf.read('f')
336  h['xp2'][i] = bf.read('f')
337  h['yp2'][i] = bf.read('f')
338  h['zpoint1'][i] = bf.read('f')
339  h['zpoint2'][i] = bf.read('f')
340 
341  junk.append(bf.read('2i'))
342  h['npart'][i] = bf.read('i')
343  h['mpart'][i] = bf.read('i')
344 
345  junk.append(bf.read('i'))
346  # initialise release fields
347 
348  l = bf.read('i') # get compoint length?
349  gt = bf.tell() + l # create 'goto' point
350  sp = ''
351  while re.search("\w", bf.read('c')): # collect the characters for the compoint
352  bf.seek(-1, 1)
353  sp = sp + bf.read('c')
354 
355  bf.seek(gt) # skip ahead to gt point
356 
357  h['compoint'].append(sp) # species names in dictionary for each nspec
358  # h['compoint'].append(''.join([bf.read('c') for i in range(45)]))
359 
360  junk.append(bf.read('i'))
361  # now loop for nspec to get xmass
362  for v in range(h['nspec']):
363  Dfmt = ['i', 'f', '2i', 'f', '2i', 'f', 'i']
364  a = [bf.read(fmt) for fmt in Dfmt]
365  h['xmass'][i, v] = a[1]
366 
367  if OPS.readp is False:
368  """
369  We get the first set of release points here in order
370  to get some information, but skip reading the rest
371  """
372  bf.seek(before_readp)
373  if 'V6' in version:
374  bf.seek(157 * h['numpoint'], 1);
375  else:
376  bf.seek(119 * h['numpoint'] + (h['nspec'] * 36) * h['numpoint'] + 4, 1)
377  break
378 
379 
380 
381  if 'V6' in version:
382  h['method'] = bf.read('i')
383 
384  else:
385  junk.append(bf.read('i'))
386  junk.append(bf.read('i'))
387 
388  h['lsubgrid'] = bf.read('i')
389  h['lconvection'] = bf.read('i')
390  h['ind_source'] = bf.read('i')
391  h['ind_receptor'] = bf.read('i')
392 
393  if 'V6' in version:
394  pass
395  else:
396  junk.append(bf.read('2i'))
397 
398  h['nageclass'] = bf.read('i')
399 
400  Lage_fmt = ['i'] * h.nageclass
401  h['lage'] = [bf.read(fmt) for fmt in Lage_fmt]
402 
403  # Orography
404  nx = h['numxgrid']
405  ny = h['numygrid']
406  Dfmt = ['f'] * nx
407  h['oro'] = np.zeros((nx, ny), np.float)
408  junk.append(bf.read('2i'))
409 
410  for ix in range(nx):
411  h['oro'][ix] = bf.read('f', ny)
412  bf.seek(8, 1)
413 
414  # Why was this? / deprecated.
415  # if h['loutstep'] < 0:
416  # h['nspec'] = h['numpoint']
417 
418  bf.close()
419 
420 
421 
422 
423  ############# ADDITIONS ###########
424  # attributes to header that can be added after reading below here
425  h['pathname'] = pathname
426  h['decayconstant'] = 0
427  h.path = pathname
428 
429 
430  # Calculate Height (outheight + topography)
431  # There is an offset issue here related to the 0-indexing. Be careful.
432  oro = h['oro'] # z is a numpy array
433  nx = h['numxgrid']
434  ny = h['numygrid']
435  nz = h['numzgrid']
436 
437  Heightnn = np.zeros((nx, ny, nz), np.float)
438  for ix in range(nx):
439  if OPS.ltopo == 1:
440  Heightnn[ix, :, 0] = oro[ix, :]
441  else:
442  Heightnn[ix, :, 0] = np.zeros(ny)
443 
444  for iz in range(nz):
445  if OPS.ltopo == 1:
446  Heightnn[ix, :, iz] = [h['outheight'][iz] + oro[ix, y] for y in range(ny)]
447  else:
448  Heightnn[ix, :, iz] = h['outheight'][iz]
449 
450  h['Area'] = gridarea(h)
451  h['Heightnn'] = Heightnn
452  h['nx'] = nx
453  h['ny'] = ny
454  h['nz'] = nz
455 
456  # Convert ireleasestart and ireleaseend to datetimes, fix issue #10
457  start_day = datetime.datetime.strptime(str(h['ibdate']), '%Y%m%d')
458  H, M, S , = [int(str(h['ibtime']).zfill(6)[i:i + 2]) for i in range(0, 6, 2)]
459  start_time = datetime.timedelta(hours=H, minutes=M, seconds=S)
460  h['simulationstart'] = start_day + start_time
461 
462  if OPS.readp:
463  releasestart, releaseend = [], []
464  for i in range(h.numpointspec):
465  releasestart.append(h.simulationstart + \
466  datetime.timedelta(seconds=int(h.ireleasestart[i])))
467  releaseend.append(h.simulationstart + \
468  datetime.timedelta(seconds=int(h.ireleaseend[i])))
469  h.releasestart = releasestart
470  h.releaseend = releaseend[:h.numpointspec]
471  h.releasetimes = [b - ((b - a) / 2) for a, b in zip(h.releasestart, h.releaseend)]
472 
473  # Add datetime objects for dates
474  available_dates_dt = []
475  for i in h.available_dates:
476  available_dates_dt.append(datetime.datetime(
477  int(i[:4]), int(i[4:6]), int(i[6:8]), int(i[8:10]), int(i[10:12]), int(i[12:])))
478  h.available_dates_dt = available_dates_dt
479  h.first_date = available_dates_dt[0]
480  h.last_date = available_dates_dt[-1]
481  h.ageclasses = np.array([act - h.simulationstart for act in h.available_dates_dt])
482  h.numageclasses = len(h.ageclasses)
483 
484  # Add other helpful attributes
485  h.nxmax = h.numxgrid
486  h.nymax = h.numygrid
487  h.nzmax = h.numzgrid
488  h.maxspec = h.nspec
489  h.maxpoint = h.numpoint
490  h.maxageclass = h.numageclasses
491 
492  h.area = h.Area # fix an annoyance
493 
494  if OPS.readp:
495  h.xpoint = h.xp1
496  h.ypoint = h.yp1
497 
498  # Add release unit derived from kindz
499  if 'kindz' not in h.keys():
500  h.kindz = [0]
501  h.alt_unit = 'unkn.'
502  if 3 in h.kindz:
503  h.alt_unit = 'hPa'
504  elif 2 in h.kindz:
505  h.alt_unit = 'm.a.s.l.'
506  elif 1 in h.kindz:
507  h.alt_unit = 'm.a.g.l.'
508 
509  #
510  if h.loutstep > 0:
511  h.direction = 'forward'
512  h.unit = 'conc' # could be pptv
513  h.plot_unit = 'ppb'
514  else:
515  h.direction = 'backward'
516  h.unit = 'time'
517  h.plot_unit = 'ns / kg' # Not sure about this
518 
519  # Units based on Table 1, ACP 2005
520  if h.direction == 'forward':
521  if h.ind_source == 1:
522  if h.ind_receptor == 1:
523  h.output_unit = 'ng m-3'
524  if h.ind_receptor == 2:
525  h.output_unit = 'pptm'
526  if h.ind_source == 2:
527  if h.ind_receptor == 1:
528  h.output_unit = 'ng m-3'
529  if h.ind_receptor == 2:
530  h.output_unit = 'pptm'
531  if h.direction == 'backward':
532  if h.ind_source == 1:
533  if h.ind_receptor == 1:
534  h.output_unit = 's'
535  if h.ind_receptor == 2:
536  h.output_unit = 's m^3 kg-1'
537  if h.ind_source == 2:
538  if h.ind_receptor == 1:
539  h.output_unit = 's kg m-3'
540  if h.ind_receptor == 2:
541  h.output_unit = 's'
542 
543 
544 
545  # Add layer thickness
546  layerthickness = [h.outheight[0]]
547  for i, lh in enumerate(h.outheight[1:]):
548  layerthickness.append(lh - h.outheight[i])
549  h.layerthickness = layerthickness
550 
551  h.options = OPS
552  h.fp_version = h.flexpart
553  h.junk = junk # the extra bits that were read... more for debugging
554 
555 
556  #print('Read {0} Header: {1}'.format(h.fp_version, filename))
557 
558  return h
559 
560 # BW Compatability
561 readheader = read_header
562 readheaderV8 = read_header
563 readheaderV6 = read_header
564 
565 
566 
567 
568 ########### HELPER FUNCTIONS ##########
569 class BinaryFile(object):
570 
571  """
572 
573  BinaryFile: A class for accessing data to/from large binary files
574 
575 
576  The data is meant to be read/write sequentially from/to a binary file.
577  One can request to read a piece of data with a specific type and shape
578  from it. Also, it supports the notion of Fortran and C ordered data,
579  so that the returned data is always well-behaved (C-contiguous and
580  aligned).
581 
582  This class is seeking capable.
583 
584  :Author: Francesc Alted
585  :Contact: faltet@pytables.org
586  :Created: 2010-03-18
587  :Acknowledgment: Funding for the development of this code is provided
588  through the Norwegian Research Council VAUUAV project #184724, 2010
589 
590  """
591 
592 
593  # Common types whose conversion can be accelerated via the struct
594  # module
595  structtypes = {
596  'i1': 'b', 'i2': 'h', 'i4': 'i',
597  'f4': 'f', 'f8': 'd',
598  }
599 
600  def __init__(self, filename, mode="r", order="fortran"):
601  """Open the `filename` for write/read binary files.
602 
603  The `mode` can be 'r', 'w' or 'a' for reading (default),
604  writing or appending. The file will be created if it doesn't
605  exist when opened for writing or appending; it will be
606  truncated when opened for writing. Add a '+' to the mode to
607  allow simultaneous reading and writing.
608 
609  `order` specifies whether the file is is written in 'fortran'
610  or 'c' order.
611  """
612  self.mode = mode + "b"
613  self.file = open(filename, mode=self.mode, buffering=1)
614  """The file handler."""
615  if order not in ['fortran', 'c']:
616  raise ValueError, "order should be either 'fortran' or 'c'."
617  self.order = order
618  """The order for file ('c' or 'fortran')."""
619 
620 
621  def read(self, dtype, shape=(1,)):
622  """Read an array of `dtype` and `shape` from current position.
623 
624  `shape` must be any tuple made of integers or even () for scalars.
625 
626  The current position will be updated to point to the end of
627  read data.
628  """
629  if not isinstance(dtype, np.dtype):
630  dtype = np.dtype(dtype)
631  if type(shape) is int:
632  shape = (shape,)
633  if type(shape) is not tuple:
634  raise ValueError, "shape must be a tuple"
635  length = dtype.itemsize
636  rank = len(shape)
637  if rank == 1:
638  length *= shape[0]
639  elif rank > 1:
640  length *= np.array(shape).prod()
641 
642  # Correct the shape in case dtype is multi-dimensional
643  if shape != (1,):
644  shape = shape + dtype.shape
645  else:
646  shape = dtype.shape
647  rank = len(shape)
648 
649  if shape in (1, (1,)):
650  order = "c"
651  else:
652  order = self.order
653 
654  # Read the data from file
655  data = self.file.read(length)
656  if len(data) < length:
657  raise EOFError, "Asking for more data than available in file."
658 
659  # Convert read string into a regular array, or scalar
660  dts = dtype.base.str[1:]
661  if rank == 0:
662  if dts[1] == "S":
663  data = str(data)
664  elif dts in self.structtypes:
665  data = struct.unpack(self.structtypes[dts], data)[0]
666  else:
667  data = np.ndarray(shape=shape, buffer=data, dtype=dtype.base)
668  if rank == 0:
669  # Retrieve the scalar out of the 0-dim array
670  data = data[()]
671 
672  if rank > 1:
673  # If original data file is in fortran mode, reverse the
674  # shape first
675  if order == "fortran":
676  shape = [i for i in shape[::-1]]
677  data = data.reshape(shape)
678  # If original data file is in fortran mode, do a transpose.
679  # As the shape was reversed previously, we get the original
680  # shape again.
681  if self.order == "fortran":
682  data = data.transpose().copy()
683  # Do an additional copy just in case the array is not
684  # well-behaved (i.e., it is not aligned or not contiguous).
685  elif not data.flags.behaved:
686  data = data.copy()
687  return data
688 
689 
690  def write(self, arr):
691  """Write an `arr` to current position.
692 
693  The current position will be updated to point to the end of
694  written data.
695  """
696  # Transpose data if case we need to
697  if (self.order == "fortran") != (arr.flags.fortran):
698  arr = arr.transpose().copy()
699  # Write the data to file
700  self.file.write(arr.data)
701 
702 
703  def seek(self, offset, whence=0):
704  """Move to new file position.
705 
706  Argument offset is a byte count. Optional argument whence
707  defaults to 0 (offset from start of file, offset should be >=
708  0); other values are 1 (move relative to current position,
709  positive or negative), and 2 (move relative to end of file,
710  usually negative, although many platforms allow seeking beyond
711  the end of a file). If the file is opened in text mode, only
712  offsets returned by tell() are legal. Use of other offsets
713  causes undefined behavior.
714  """
715  self.file.seek(offset, whence)
716 
717 
718  def tell(self):
719  "Returns current file position, an integer (may be a long integer)."
720  return self.file.tell()
721 
722 
723  def flush(self):
724  "Flush buffers to file."
725  self.file.flush()
726 
727 
728  def close(self):
729  "End access to file."
730  self.file.close()
731 
732 
733 def closest(num, numlist):
734  """ returns the index of the *closest* value in a list """
735  # check if we're using datetimes
736  dates = False
737  if isinstance(num, datetime.datetime):
738  dates = True
739  if dates:
740  num = date2num(num)
741  assert isinstance(numlist[0], datetime.datetime), \
742  "num is date, numlist must be a list of dates"
743  numlist = date2num(numlist)
744 
745  return (np.abs(numlist - num)).argmin()
746 
747 
748 
749 class Structure(dict, object):
750  """ A 'fancy' dictionary that provides 'MatLab' structure-like
751  referencing.
752 
753  .. warning::
754  may be replaced with a pure dict in future release.
755 
756  """
757  def __getattr__(self, attr):
758  # Fake a __getstate__ method that returns None
759  if attr == "__getstate__":
760  return lambda: None
761  return self[attr]
762 
763  def __setattr__(self, attr, value):
764  self[attr] = value
765 
766  def __dir__(self):
767  """ necessary for Ipython tab-completion """
768  return self.keys()
769 
770  def set_with_dict(self, D):
771  """ set attributes with a dict """
772  for k in D.keys():
773  self.__setattr__(k, D[k])
774 
775  def __dir__(self):
776  return self.keys()
777 
778 class Header(Structure):
779  """This is the primary starting point for processing FLEXPART output.
780  The Header class ( :class:`Structure` ) behaves like a dictionary.
781  It contains all the metadata from the simulation run as read from the
782  "header" or "header_nest" binary files from the model output.
783 
784  This version is using the BinaryFile class rather than FortFlex.
785 
786  Usage::
787 
788  > H = pf.Header(inputpath)
789  > H.keys() #provides a list of keys available
790 
791  Returns a dictionary
792 
793  H = dictionary like object with all the run metadata. TODO: Fill in keys.
794 
795 
796  Arguments
797 
798  .. tabularcolumns:: |l|L|
799 
800  ============== ========================================
801  keyword Description [default]
802  ============== ========================================
803  path path to the run directory
804  headerfile name of the header file if non standard
805  readheader_ops optional dictionary to pass readheader
806  ============== ========================================
807 
808  Arguments for readheader_ops
809 
810  .. tabularcolumns:: |l|L|
811 
812  ============= ========================================
813  keyword Description [default]
814  ============= ========================================
815  pathname FLEXPART run output directory
816  readp read release points 0=no, [1]=y
817  nested nested output True or [False]
818  version version of FLEXPART, default = 'V8'
819  ============= ========================================
820 
821 
822  .. note::
823  **This function is in development**
824 
825  This function is being developed so that there is no dependence on
826  using f2py to compile the FortFlex module. It is working using the
827  :class:`BinaryFile`, but is notably slower than :class:`FortFlex`.
828  Please report any bugs found.
829 
830 
831  """
832 
833 
834  def __init__(self, path=None, headerfile=None, version='V8', **readheader_ops):
835  """
836 
837 
838  """
839  try:
840  h = read_header(path, **readheader_ops)
841  self.set_with_dict(h)
842  self.lonlat()
843  self.version = 'V8'
844  except:
845  traceback.print_exc()
846  raise IOError('''
847  Could not set header variables.
848  Does the `header` file exist in path?\n{0}'''.format(path))
849 
850 
851 
852 
853  def lonlat(self):
854  """ Add longitude and latitude attributes using data from header """
855  lons = np.arange(self.outlon0, self.outlon0 + (self.dxout * self.numxgrid), self.dxout)
856  lats = np.arange(self.outlat0, self.outlat0 + (self.dyout * self.numygrid), self.dyout)
857  self.longitude = lons
858  self.latitude = lats
859 
860  def read_grid(self, **kwargs):
861  """ see :func:`read_grid` """
862  self.FD = read_grid(self, **kwargs)
863 
864  def fill_backward(self, **kwargs):
865  """ see :func:`fill_backward` """
866  fill_grids(self, add_attributes=True, **kwargs)
867 
868  def add_trajectory(self, **kwargs):
869  """ see :func:`read_trajectories` """
870  self.trajectory = read_trajectories(self)
871 
872  def add_fires(self, **kwargs):
873  """ uses the :mod:`emissions` module to read the MODIS hotspot data and
874  add it to the header class as a 'fires' attribute.
875 
876  **This function is only available within NILU.**
877 
878  """
879 
880  from nilu.pflexpart import emissions as em
881  self.fires = None
882  for day in self.available_dates_dt:
883  # day = day[:8]
884  firedata = em.MODIS_hotspot(day)
885  daily = firedata.daily
886  # pdb.set_trace()
887  if daily is None:
888  continue
889  else:
890  if self.fires == None:
891  self.fires = daily
892  else:
893 
894  self.fires = np.hstack((self.fires, daily)).view(np.recarray)
895 
896  def closest_dates(self, dateval, fmt=None, take_set=False):
897  """ given an iterable of datetimes, finds the closest dates.
898  if passed a list, assumes it is a list of datetimes
899 
900  if take_set=True, then a set of unique values will be returned.
901  This can be used with H.read_grid as the value for time_ret to
902  return only the grids matching the array of times.
903  See (e.g. `extract_curtain`).
904  """
905 
906  try:
907  vals = [closest(d, self['available_dates_dt']) for d in dateval]
908  if take_set:
909  return list(set(vals))
910  else:
911  return vals
912 
913  except IOError:
914  print('If dateval is iterable, must contain datetimes')
915 
916 
917  def closest_date(self, dateval, fmt=None):
918  """ given a datestring or datetime, tries to find the closest date.
919  if passed a list, assumes it is a list of datetimes
920 
921  """
922 
923  if isinstance(dateval, str):
924  if not fmt:
925  if len(dateval) == 8:
926  fmt = '%Y%m%d'
927  if len(dateval) == 14:
928  fmt = '%Y%m%d%H%M%S'
929  else:
930  raise IOError("no format provided for datestring")
931  print("Assuming date format: {0}".format(fmt))
932  dateval = datetime.datetime.strptime(dateval, fmt)
933 
934  return closest(dateval, self['available_dates_dt'])
935 
HELPER FUNCTIONS ##########.
Definition: read_header.py:569
def gridarea
Functions for reading FLEXPART output #####.
Definition: read_header.py:36