1 "table definitions"
2 import os
3 import sys
4 import csv
5 import codecs
6 import unicodedata
7 import weakref
8 from array import array
9 from decimal import Decimal
10 from dbf import _io as io
11 from dbf.dates import Date, DateTime, Time
12 from dbf.exceptions import Bof, Eof, DbfError, DataOverflow, FieldMissing, NonUnicode
13
14 input_decoding = 'cp850'
15 default_codepage = 'cp1252'
16 return_ascii = True
17
18 version_map = {
19 '\x02' : 'FoxBASE',
20 '\x03' : 'dBase III Plus',
21 '\x04' : 'dBase IV',
22 '\x05' : 'dBase V',
23 '\x30' : 'Visual FoxPro',
24 '\x31' : 'Visual FoxPro (auto increment field)',
25 '\x43' : 'dBase IV SQL',
26 '\x7b' : 'dBase IV w/memos',
27 '\x83' : 'dBase III Plus w/memos',
28 '\x8b' : 'dBase IV w/memos',
29 '\x8e' : 'dBase IV w/SQL table' }
30
31 code_pages = {
32 '\x01' : ('cp437', 'U.S. MS-DOS'),
33 '\x02' : ('cp850', 'International MS-DOS'),
34 '\x03' : ('cp1252', 'Windows ANSI'),
35 '\x04' : ('mac_roman', 'Standard Macintosh'),
36
37 '\x08' : ('cp865', 'Danish OEM'),
38 '\x09' : ('cp437', 'Dutch OEM'),
39 '\x0A' : ('cp850', 'Dutch OEM (secondary)'),
40 '\x0B' : ('cp437', 'Finnish OEM'),
41 '\x0D' : ('cp437', 'French OEM'),
42 '\x0E' : ('cp850', 'French OEM (secondary)'),
43 '\x0F' : ('cp437', 'German OEM'),
44 '\x10' : ('cp850', 'German OEM (secondary)'),
45 '\x11' : ('cp437', 'Italian OEM'),
46 '\x12' : ('cp850', 'Italian OEM (secondary)'),
47 '\x13' : ('cp932', 'Japanese Shift-JIS'),
48 '\x14' : ('cp850', 'Spanish OEM (secondary)'),
49 '\x15' : ('cp437', 'Swedish OEM'),
50 '\x16' : ('cp850', 'Swedish OEM (secondary)'),
51 '\x17' : ('cp865', 'Norwegian OEM'),
52 '\x18' : ('cp437', 'Spanish OEM'),
53 '\x19' : ('cp437', 'English OEM (Britain)'),
54 '\x1A' : ('cp850', 'English OEM (Britain) (secondary)'),
55 '\x1B' : ('cp437', 'English OEM (U.S.)'),
56 '\x1C' : ('cp863', 'French OEM (Canada)'),
57 '\x1D' : ('cp850', 'French OEM (secondary)'),
58 '\x1F' : ('cp852', 'Czech OEM'),
59 '\x22' : ('cp852', 'Hungarian OEM'),
60 '\x23' : ('cp852', 'Polish OEM'),
61 '\x24' : ('cp860', 'Portugese OEM'),
62 '\x25' : ('cp850', 'Potugese OEM (secondary)'),
63 '\x26' : ('cp866', 'Russian OEM'),
64 '\x37' : ('cp850', 'English OEM (U.S.) (secondary)'),
65 '\x40' : ('cp852', 'Romanian OEM'),
66 '\x4D' : ('cp936', 'Chinese GBK (PRC)'),
67 '\x4E' : ('cp949', 'Korean (ANSI/OEM)'),
68 '\x4F' : ('cp950', 'Chinese Big 5 (Taiwan)'),
69 '\x50' : ('cp874', 'Thai (ANSI/OEM)'),
70 '\x57' : ('cp1252', 'ANSI'),
71 '\x58' : ('cp1252', 'Western European ANSI'),
72 '\x59' : ('cp1252', 'Spanish ANSI'),
73
74 '\x64' : ('cp852', 'Eastern European MS-DOS'),
75 '\x65' : ('cp866', 'Russian MS-DOS'),
76 '\x66' : ('cp865', 'Nordic MS-DOS'),
77 '\x67' : ('cp861', 'Icelandic MS-DOS'),
78
79 '\x68' : (None, 'Kamenicky (Czech) MS-DOS'),
80 '\x69' : (None, 'Mazovia (Polish) MS-DOS'),
81
82 '\x6a' : ('cp737', 'Greek MS-DOS (437G)'),
83 '\x6b' : ('cp857', 'Turkish MS-DOS'),
84 '\x78' : ('cp950', 'Traditional Chinese (Hong Kong SAR, Taiwan) Windows'),
85 '\x79' : ('cp949', 'Korean Windows'),
86 '\x7a' : ('cp936', 'Chinese Simplified (PRC, Singapore) Windows'),
87 '\x7b' : ('cp932', 'Japanese Windows'),
88 '\x7c' : ('cp874', 'Thai Windows'),
89 '\x7d' : ('cp1255', 'Hebrew Windows'),
90 '\x7e' : ('cp1256', 'Arabic Windows'),
91 '\xc8' : ('cp1250', 'Eastern European Windows'),
92 '\xc9' : ('cp1251', 'Russian Windows'),
93 '\xca' : ('cp1254', 'Turkish Windows'),
94 '\xcb' : ('cp1253', 'Greek Windows'),
95 '\x96' : ('mac_cyrillic', 'Russian Macintosh'),
96 '\x97' : ('mac_latin2', 'Macintosh EE'),
97 '\x98' : ('mac_greek', 'Greek Macintosh') }
98
100 """Provides routines to extract and save data within the fields of a dbf record."""
101 __slots__ = ['_recnum', '_layout', '_data', '__weakref__']
103 """calls appropriate routine to fetch value stored in field from array
104 @param record_data: the data portion of the record
105 @type record_data: array of characters
106 @param fielddef: description of the field definition
107 @type fielddef: dictionary with keys 'type', 'start', 'length', 'end', 'decimals', and 'flags'
108 @returns: python data stored in field"""
109
110 field_type = fielddef['type']
111 retrieve = yo._layout.fieldtypes[field_type]['Retrieve']
112 datum = retrieve(record_data, fielddef, yo._layout.memo)
113 if field_type in yo._layout.character_fields:
114 datum = yo._layout.decoder(datum)[0]
115 if yo._layout.return_ascii:
116 try:
117 datum = yo._layout.output_encoder(datum)[0]
118 except UnicodeEncodeError:
119 datum = unicodedata.normalize('NFD', datum).encode('ascii','ignore')
120 return datum
122 "calls appropriate routine to convert value to ascii bytes, and save it in record"
123 field_type = fielddef['type']
124 update = yo._layout.fieldtypes[field_type]['Update']
125 if field_type in yo._layout.character_fields:
126 if not isinstance(value, unicode):
127 if yo._layout.input_decoder is None:
128 raise NonUnicode("String not in unicode format, no default encoding specified")
129 value = yo._layout.input_decoder(value)[0]
130 value = yo._layout.encoder(value)[0]
131 bytes = array('c', update(value, fielddef, yo._layout.memo))
132 size = fielddef['length']
133 if len(bytes) > size:
134 raise DataOverflow("tried to store %d bytes in %d byte field" % (len(bytes), size))
135 blank = array('c', ' ' * size)
136 start = fielddef['start']
137 end = start + size
138 blank[:len(bytes)] = bytes[:]
139 yo._data[start:end] = blank[:]
140 yo._updateDisk(yo._recnum * yo._layout.header.recordlength() + yo._layout.header.start(), yo._data.tostring())
152 results = []
153 if not specs:
154 specs = yo._layout.index
155 specs = _normalize_tuples(tuples=specs, length=2, filler=[_nop])
156 for field, func in specs:
157 results.append(func(yo[field]))
158 return tuple(results)
159
165 if name[0:2] == '__' and name[-2:] == '__':
166 raise AttributeError, 'Method %s is not implemented.' % name
167 elif not name in yo._layout.fields:
168 raise FieldMissing(name)
169 try:
170 fielddef = yo._layout[name]
171 value = yo._retrieveFieldValue(yo._data[fielddef['start']:fielddef['end']], fielddef)
172 return value
173 except DbfError, error:
174 error.message = "field --%s-- is %s -> %s" % (name, yo._layout.fieldtypes[fielddef['type']]['Type'], error.message)
175 raise
192 - def __new__(cls, recnum, layout, kamikaze='', _fromdisk=False):
238 result = []
239 for field in yo.field_names():
240 result.append("%-10s: %s" % (field, yo[field]))
241 return '\n'.join(result)
243 return yo._data.tostring()
245 "creates a blank record data chunk"
246 layout = yo._layout
247 ondisk = layout.ondisk
248 layout.ondisk = False
249 yo._data = array('c', ' ' * layout.header.recordlength())
250 layout.memofields = []
251 for field in layout.fields:
252 yo._updateFieldValue(layout[field], layout.fieldtypes[layout[field]['type']]['Blank']())
253 if layout[field]['type'] in layout.memotypes:
254 layout.memofields.append(field)
255 layout.blankrecord = yo._data[:]
256 layout.ondisk = ondisk
258 "physical record number"
259 return yo._recnum
261 "marked for deletion?"
262 return yo._data[0] == '*'
271 "saves a dictionary into a records fields\nkeys with no matching field will raise a FieldMissing exception unless drop = True"
272 for key in dict:
273 if not key in yo.field_names():
274 if drop:
275 continue
276 raise FieldMissing(key)
277 yo.__setattr__(key, dict[key])
292 "returns a dictionary of fieldnames and values which can be used with gather_fields(). if blank is True, values are empty."
293 keys = yo._layout.fields
294 if blank:
295 values = [yo._layout.fieldtypes[yo._layout[key]['type']]['Blank']() for key in keys]
296 else:
297 values = [yo[field] for field in keys]
298 return dict(zip(keys, values))
304 """Provides access to memo fields as dictionaries
305 must override _init, _get_memo, and _put_memo to
306 store memo contents to disk"""
308 "initialize disk file usage"
310 "retrieve memo contents from disk"
312 "store memo contents to disk"
314 ""
315 yo.meta = meta
316 yo.memory = {}
317 yo.nextmemo = 1
318 yo._init()
319 yo.meta.newmemofile = False
321 "gets the memo in block"
322 if yo.meta.ignorememos or not block:
323 return ''
324 if yo.meta.ondisk:
325 return yo._get_memo(block)
326 else:
327 return yo.memory[block]
329 "stores data in memo file, returns block number"
330 if yo.meta.ignorememos or data == '':
331 return 0
332 if yo.meta.inmemory:
333 thismemo = yo.nextmemo
334 yo.nextmemo += 1
335 yo.memory[thismemo] = data
336 else:
337 thismemo = yo._put_memo(data)
338 return thismemo
341 "dBase III specific"
342 yo.meta.memo_size= 512
343 yo.record_header_length = 2
344 if yo.meta.ondisk and not yo.meta.ignorememos:
345 if yo.meta.newmemofile:
346 yo.meta.mfd = open(yo.meta.memoname, 'w+b')
347 yo.meta.mfd.write(io.packLongInt(1) + '\x00' * 508)
348 else:
349 try:
350 yo.meta.mfd = open(yo.meta.memoname, 'r+b')
351 yo.meta.mfd.seek(0)
352 yo.nextmemo = io.unpackLongInt(yo.meta.mfd.read(4))
353 except:
354 raise DbfError("memo file appears to be corrupt")
356 block = int(block)
357 yo.meta.mfd.seek(block * yo.meta.memo_size)
358 eom = -1
359 data = ''
360 while eom == -1:
361 newdata = yo.meta.mfd.read(yo.meta.memo_size)
362 if not newdata:
363 return data
364 data += newdata
365 eom = data.find('\x1a\x1a')
366 return data[:eom].rstrip()
368 length = len(data) + yo.record_header_length
369 blocks = length // yo.meta.memo_size
370 if length % yo.meta.memo_size:
371 blocks += 1
372 thismemo = yo.nextmemo
373 yo.nextmemo = thismemo + blocks
374 yo.meta.mfd.seek(0)
375 yo.meta.mfd.write(io.packLongInt(yo.nextmemo))
376 yo.meta.mfd.seek(thismemo * yo.meta.memo_size)
377 yo.meta.mfd.write(data)
378 yo.meta.mfd.write('\x1a\x1a')
379 if len(yo._get_memo(thismemo)) != len(data):
380 raise DbfError("unknown error: memo not saved")
381 return thismemo
384 "Visual Foxpro 6 specific"
385 if yo.meta.ondisk and not yo.meta.ignorememos:
386 yo.record_header_length = 8
387 if yo.meta.newmemofile:
388 if yo.meta.memo_size == 0:
389 yo.meta.memo_size = 1
390 elif 1 < yo.meta.memo_size < 33:
391 yo.meta.memo_size *= 512
392 yo.meta.mfd = open(yo.meta.memoname, 'w+b')
393 nextmemo = 512 // yo.meta.memo_size
394 if nextmemo * yo.meta.memo_size < 512:
395 nextmemo += 1
396 yo.nextmemo = nextmemo
397 yo.meta.mfd.write(io.packLongInt(nextmemo, bigendian=True) + '\x00\x00' + \
398 io.packShortInt(yo.meta.memo_size, bigendian=True) + '\x00' * 504)
399 else:
400 try:
401 yo.meta.mfd = open(yo.meta.memoname, 'r+b')
402 yo.meta.mfd.seek(0)
403 header = yo.meta.mfd.read(512)
404 yo.nextmemo = io.unpackLongInt(header[:4], bigendian=True)
405 yo.meta.memo_size = io.unpackShortInt(header[6:8], bigendian=True)
406 except:
407 raise DbfError("memo file appears to be corrupt")
409 yo.meta.mfd.seek(block * yo.meta.memo_size)
410 header = yo.meta.mfd.read(8)
411 length = io.unpackLongInt(header[4:], bigendian=True)
412 return yo.meta.mfd.read(length)
414 yo.meta.mfd.seek(0)
415 thismemo = io.unpackLongInt(yo.meta.mfd.read(4), bigendian=True)
416 yo.meta.mfd.seek(0)
417 length = len(data) + yo.record_header_length
418 blocks = length // yo.meta.memo_size
419 if length % yo.meta.memo_size:
420 blocks += 1
421 yo.meta.mfd.write(io.packLongInt(thismemo+blocks, bigendian=True))
422 yo.meta.mfd.seek(thismemo*yo.meta.memo_size)
423 yo.meta.mfd.write('\x00\x00\x00\x01' + io.packLongInt(len(data), bigendian=True) + data)
424 return thismemo
426 """Provides a framework for dbf style tables."""
427 _version = 'basic memory table'
428 _versionabbv = 'dbf'
429 _fieldtypes = {
430 'D' : { 'Type':'Date', 'Init':io.addDate, 'Blank':Date.today, 'Retrieve':io.retrieveDate, 'Update':io.updateDate, },
431 'L' : { 'Type':'Logical', 'Init':io.addLogical, 'Blank':bool, 'Retrieve':io.retrieveLogical, 'Update':io.updateLogical, },
432 'M' : { 'Type':'Memo', 'Init':io.addMemo, 'Blank':str, 'Retrieve':io.retrieveMemo, 'Update':io.updateMemo, } }
433 _memoext = ''
434 _memotypes = tuple('M', )
435 _memoClass = _DbfMemo
436 _yesMemoMask = ''
437 _noMemoMask = ''
438 _fixed_fields = ('M','D','L')
439 _variable_fields = tuple()
440 _character_fields = tuple('M', )
441 _decimal_fields = tuple()
442 _numeric_fields = tuple()
443 _dbfTableHeader = array('c', '\x00' * 32)
444 _dbfTableHeader[0] = '\x00'
445 _dbfTableHeader[8:10] = array('c', io.packShortInt(33))
446 _dbfTableHeader[10] = '\x01'
447 _dbfTableHeader[29] = '\x00'
448 _dbfTableHeader = _dbfTableHeader.tostring()
449 _dbfTableHeaderExtra = ''
450 _supported_tables = []
451 _read_only = False
452 _meta_only = False
453 _use_deleted = True
471 if len(data) != 32:
472 raise DbfError('table header should be 32 bytes, but is %d bytes' % len(data))
473 yo._data = array('c', data + '\x0d')
475 "get/set code page of table"
476 if cp is None:
477 return yo._data[29]
478 else:
479 if cp not in code_pages:
480 for code_page in sorted(code_pages.keys()):
481 sd, ld = code_pages[code_page]
482 if cp == sd or cp == ld:
483 if sd is None:
484 raise DbfError("Unsupported codepage: %s" % ld)
485 cp = code_page
486 break
487 else:
488 raise DbfError("Unsupported codepage: %s" % cp)
489 yo._data[29] = cp
490 return cp
492 "get/set entire structure"
493 if bytes is None:
494 date = io.packDate(Date.today())
495 yo._data[1:4] = array('c', date)
496 return yo._data.tostring()
497 else:
498 if len(bytes) < 32:
499 raise DbfError("length for data of %d is less than 32" % len(bytes))
500 yo._data[:] = array('c', bytes)
502 "get/set any extra dbf info (located after headers, before data records)"
503 fieldblock = yo._data[32:]
504 for i in range(len(fieldblock)//32+1):
505 cr = i * 32
506 if fieldblock[cr] == '\x0d':
507 break
508 else:
509 raise DbfError("corrupt field structure")
510 cr += 33
511 if data is None:
512 return yo._data[cr:].tostring()
513 else:
514 yo._data[cr:] = array('c', data)
515 yo._data[8:10] = array('c', io.packShortInt(len(yo._data)))
517 "number of fields (read-only)"
518 fieldblock = yo._data[32:]
519 for i in range(len(fieldblock)//32+1):
520 cr = i * 32
521 if fieldblock[cr] == '\x0d':
522 break
523 else:
524 raise DbfError("corrupt field structure")
525 return len(fieldblock[:cr]) // 32
527 "get/set field block structure"
528 fieldblock = yo._data[32:]
529 for i in range(len(fieldblock)//32+1):
530 cr = i * 32
531 if fieldblock[cr] == '\x0d':
532 break
533 else:
534 raise DbfError("corrupt field structure")
535 if block is None:
536 return fieldblock[:cr].tostring()
537 else:
538 cr += 32
539 fieldlen = len(block)
540 if fieldlen % 32 != 0:
541 raise DbfError("fields structure corrupt: %d is not a multiple of 32" % fieldlen)
542 yo._data[32:cr] = array('c', block)
543 yo._data[8:10] = array('c', io.packShortInt(len(yo._data)))
544 fieldlen = fieldlen // 32
545 recordlen = 1
546 for i in range(fieldlen):
547 recordlen += ord(block[i*32+16])
548 yo._data[10:12] = array('c', io.packShortInt(recordlen))
550 "get/set number of records (maximum 16,777,215)"
551 if count is None:
552 return io.unpackLongInt(yo._data[4:8].tostring())
553 else:
554 yo._data[4:8] = array('c', io.packLongInt(count))
556 "length of a record (read_only) (max of 65,535)"
557 return io.unpackShortInt(yo._data[10:12].tostring())
559 "starting position of first record in file (must be within first 64K)"
560 if pos is None:
561 return io.unpackShortInt(yo._data[8:10].tostring())
562 else:
563 yo._data[8:10] = array('c', io.packShortInt(pos))
565 "date of last table modification (read-only)"
566 return io.unpackDate(yo._data[1:4].tostring())
568 "dbf version"
569 if ver is None:
570 return yo._data[0]
571 else:
572 yo._data[0] = ver
574 "implements the weakref table for records"
576 yo._meta = meta
577 yo._weakref_list = [weakref.ref(lambda x: None)] * count
579 maybe = yo._weakref_list[index]()
580 if maybe is None:
581 if index < 0:
582 index += yo._meta.header.recordcount()
583 size = yo._meta.header.recordlength()
584 location = index * size + yo._meta.header.start()
585 yo._meta.dfd.seek(location)
586 bytes = yo._meta.dfd.read(size)
587 maybe = _DbfRecord(recnum=index, layout=yo._meta, kamikaze=bytes, _fromdisk=True)
588 yo._weakref_list[index] = weakref.ref(maybe)
589 return maybe
591 yo._weakref_list.append(weakref.ref(record))
593 "returns records using current index"
595 yo._table = table
596 yo._index = -1
597 yo._more_records = True
601 while yo._more_records:
602 yo._index += 1
603 if yo._index >= len(yo._table):
604 yo._more_records = False
605 continue
606 record = yo._table[yo._index]
607 if not yo._table.use_deleted() and record.has_been_deleted():
608 continue
609 return record
610 else:
611 raise StopIteration
613 "constructs fieldblock for disk table"
614 fieldblock = array('c', '')
615 memo = False
616 yo._meta.header.version(chr(ord(yo._meta.header.version()) & ord(yo._noMemoMask)))
617 for field in yo._meta.fields:
618 if yo._meta.fields.count(field) > 1:
619 raise DbfError("corrupted field structure (noticed in _buildHeaderFields)")
620 fielddef = array('c', '\x00' * 32)
621 fielddef[:11] = array('c', io.packStr(field))
622 fielddef[11] = yo._meta[field]['type']
623 fielddef[12:16] = array('c', io.packLongInt(yo._meta[field]['start']))
624 fielddef[16] = chr(yo._meta[field]['length'])
625 fielddef[17] = chr(yo._meta[field]['decimals'])
626 fielddef[18] = chr(yo._meta[field]['flags'])
627 fieldblock.extend(fielddef)
628 if yo._meta[field]['type'] in yo._meta.memotypes:
629 memo = True
630 yo._meta.header.fields(fieldblock.tostring())
631 if memo:
632 yo._meta.header.version(chr(ord(yo._meta.header.version()) | ord(yo._yesMemoMask)))
633 if yo._meta.memo is None:
634 yo._meta.memo = yo._memoClass(yo._meta)
636 "dBase III specific"
637 if yo._meta.header.version() == '\x83':
638 try:
639 yo._meta.memo = yo._memoClass(yo._meta)
640 except:
641 yo._meta.dfd.close()
642 yo._meta.dfd = None
643 raise
644 if not yo._meta.ignorememos:
645 for field in yo._meta.fields:
646 if yo._meta[field]['type'] in yo._memotypes:
647 if yo._meta.header.version() != '\x83':
648 yo._meta.dfd.close()
649 yo._meta.dfd = None
650 raise DbfError("Table structure corrupt: memo fields exist, header declares no memos")
651 elif not os.path.exists(yo._meta.memoname):
652 yo._meta.dfd.close()
653 yo._meta.dfd = None
654 raise DbfError("Table structure corrupt: memo fields exist without memo file")
655 break
657 "builds the FieldList of names, types, and descriptions from the disk file"
658 offset = 1
659 fieldsdef = yo._meta.header.fields()
660 if len(fieldsdef) % 32 != 0:
661 raise DbfError("field definition block corrupt: %d bytes in size" % len(fieldsdef))
662 if len(fieldsdef) // 32 != yo.field_count():
663 raise DbfError("Header shows %d fields, but field definition block has %d fields" % (yo.field_count(), len(fieldsdef)//32))
664 for i in range(yo.field_count()):
665 fieldblock = fieldsdef[i*32:(i+1)*32]
666 name = io.unpackStr(fieldblock[:11])
667 type = fieldblock[11]
668 if not type in yo._meta.fieldtypes:
669 raise DbfError("Unknown field type: %s" % type)
670 start = offset
671 length = ord(fieldblock[16])
672 offset += length
673 end = start + length
674 decimals = ord(fieldblock[17])
675 flags = ord(fieldblock[18])
676 yo._meta.fields.append(name)
677 yo._meta[name] = {'type':type,'start':start,'length':length,'end':end,'decimals':decimals,'flags':flags}
679 "Returns field information Name Type(Length[,Decimals])"
680 name = yo._meta.fields[i]
681 type = yo._meta[name]['type']
682 length = yo._meta[name]['length']
683 decimals = yo._meta[name]['decimals']
684 if type in yo._decimal_fields:
685 description = "%s %s(%d,%d)" % (name, type, length, decimals)
686 elif type in yo._fixed_fields:
687 description = "%s %s" % (name, type)
688 else:
689 description = "%s %s(%d)" % (name, type, length)
690 return description
692 "loads the records from disk to memory"
693 if yo._meta_only:
694 raise DbfError("%s has been closed, records are unavailable" % yo.filename())
695 dfd = yo._meta.dfd
696 header = yo._meta.header
697 dfd.seek(header.start())
698 allrecords = dfd.read()
699 dfd.seek(0)
700 length = header.recordlength()
701 for i in range(header.recordcount()):
702 record_data = allrecords[length*i:length*i+length]
703 yo._table.append(_DbfRecord(i, yo._meta, allrecords[length*i:length*i+length], _fromdisk=True))
704 yo._index.append(i)
705 dfd.seek(0)
707 if specs is None:
708 specs = yo.field_names()
709 elif isinstance(specs, str):
710 specs = specs.split(sep)
711 else:
712 specs = list(specs)
713 specs = [s.strip() for s in specs]
714 return specs
716 "synchronizes the disk file with current data"
717 if yo._meta.inmemory:
718 return
719 fd = yo._meta.dfd
720 fd.seek(0)
721 fd.write(yo._meta.header.data())
722 if not headeronly:
723 for record in yo._table:
724 record._updateDisk()
725 fd.flush()
726 fd.truncate(yo._meta.header.start() + yo._meta.header.recordcount() * yo._meta.header.recordlength())
734 if name in ('_index','_table'):
735 if yo._meta.ondisk:
736 yo._table = yo._Table(len(yo), yo._meta)
737 yo._index = range(len(yo))
738 else:
739 yo._table = []
740 yo._index = []
741 yo._loadtable()
742 return object.__getattribute__(yo, name)
744 if type(value) == int:
745 if not -yo._meta.header.recordcount() <= value < yo._meta.header.recordcount():
746 raise IndexError("Record %d is not in table." % value)
747 return yo._table[yo._index[value]]
748 elif type(value) == slice:
749 sequence = []
750 for index in yo._index[value]:
751 record = yo._table[index]
752 if yo.use_deleted() is True or not record.has_been_deleted():
753 sequence.append(record)
754 return DbfList(yo, sequence, desc='%s --> %s' % (yo.filename(), value))
755 else:
756 raise TypeError('type <%s> not valid for indexing' % type(value))
757 - def __init__(yo, filename=':memory:', field_specs=None, memo_size=128, ignore_memos=False,
758 read_only=False, keep_memos=False, meta_only=False, codepage=None):
759 """open/create dbf file
760 filename should include path if needed
761 field_specs can be either a ;-delimited string or a list of strings
762 memo_size is always 512 for db3 memos
763 ignore_memos is useful if the memo file is missing or corrupt
764 read_only will load records into memory, then close the disk file
765 keep_memos will also load any memo fields into memory
766 meta_only will ignore all records, keeping only basic table information
767 codepage will override whatever is set in the table itself"""
768 if filename == ':memory:':
769 if field_specs is None:
770 raise DbfError("field list must be specified for in-memory tables")
771 elif type(yo) is DbfTable:
772 raise DbfError("only memory tables supported")
773 yo._meta = meta = yo._MetaData()
774 meta.filename = filename
775 meta.fields = []
776 meta.fieldtypes = yo._fieldtypes
777 meta.fixed_fields = yo._fixed_fields
778 meta.variable_fields = yo._variable_fields
779 meta.character_fields = yo._character_fields
780 meta.decimal_fields = yo._decimal_fields
781 meta.numeric_fields = yo._numeric_fields
782 meta.memotypes = yo._memotypes
783 meta.ignorememos = ignore_memos
784 meta.memo_size = memo_size
785 meta.input_decoder = codecs.getdecoder(input_decoding)
786 meta.output_encoder = codecs.getencoder(input_decoding)
787 meta.return_ascii = return_ascii
788 meta.header = header = yo._TableHeader(yo._dbfTableHeader)
789 header.extra(yo._dbfTableHeaderExtra)
790 header.data()
791 yo.codepage(codepage or default_codepage)
792 if filename == ':memory:':
793 yo._index = []
794 yo._table = []
795 meta.ondisk = False
796 meta.inmemory = True
797 meta.memoname = ':memory:'
798 else:
799 base, ext = os.path.splitext(filename)
800 if ext == '':
801 meta.filename = base + '.dbf'
802 meta.memoname = base + yo._memoext
803 meta.ondisk = True
804 meta.inmemory = False
805 if field_specs:
806 if meta.ondisk:
807 meta.dfd = open(meta.filename, 'w+b')
808 meta.newmemofile = True
809 yo.add_fields(field_specs)
810 return
811 dfd = meta.dfd = open(meta.filename, 'r+b')
812 dfd.seek(0)
813 meta.header = header = yo._TableHeader(dfd.read(32))
814 if not header.version() in yo._supported_tables:
815 dfd.close()
816 dfd = None
817 raise TypeError("Unsupported dbf type: %s [%x]" % (version_map.get(meta.header.version, 'Unknown: %s' % meta.header.version), ord(meta.header.version)))
818 yo.codepage(meta.header.codepage())
819 fieldblock = dfd.read(header.start() - 32)
820 for i in range(len(fieldblock)//32+1):
821 fieldend = i * 32
822 if fieldblock[fieldend] == '\x0d':
823 break
824 else:
825 raise DbfError("corrupt field structure in header")
826 if len(fieldblock[:fieldend]) % 32 != 0:
827 raise DbfError("corrupt field structure in header")
828 header.fields(fieldblock[:fieldend])
829 header.extra(fieldblock[fieldend+1:])
830 yo._initializeFields()
831 yo._checkMemoIntegrity()
832 meta.current = -1
833 dfd.seek(0)
834 if meta_only:
835 yo.close(keep_table=False, keep_memos=False)
836 elif read_only:
837 yo.close(keep_table=True, keep_memos=keep_memos)
845 if yo._read_only:
846 return __name__ + ".Table('%s', read_only=True)" % yo._meta.filename
847 elif yo._meta_only:
848 return __name__ + ".Table('%s', meta_only=True)" % yo._meta.filename
849 else:
850 return __name__ + ".Table('%s')" % yo._meta.filename
852 if yo._read_only:
853 status = "read-only"
854 elif yo._meta_only:
855 status = "meta-only"
856 else:
857 status = "read/write"
858 str = """
859 Table: %s
860 Type: %s
861 Codepage: %s
862 Status: %s
863 Last updated: %s
864 Record count: %d
865 Field count: %d
866 Record length: %d
867 """ % (yo.filename(), version_map.get(yo._meta.header.version(), 'unknown - ' + hex(ord(yo._meta.header.version()))),
868 yo.codepage(), status, yo.last_update(), len(yo), yo.field_count(), yo.record_length())
869 str += "\n --Fields--\n"
870 for i in range(len(yo._meta.fields)):
871 str += " " + yo._fieldLayout(i) + "\n"
872 return str
874 "the number of fields in the table"
875 return yo._meta.header.fieldcount()
877 "a list of the fields in the table"
878 return yo._meta.fields[:]
880 "table's file name, including path (if specified on open)"
881 return yo._meta.filename
883 "date of last update"
884 return yo._meta.header.update()
886 "table's memo name (if path included in filename on open)"
887 return yo._meta.memoname
889 "number of bytes in a record"
890 return yo._meta.header.recordlength()
892 "index number of the current record"
893 return yo._meta.current
903 "returns the dbf type of the table"
904 return yo._version
906 """adds field(s) to the table layout; format is Name Type(Length,Decimals)[; Name Type(Length,Decimals)[...]]
907 backup table is created with _backup appended to name
908 then modifies current structure"""
909 all_records = [record for record in yo]
910 if yo:
911 yo.create_backup()
912 yo._meta.blankrecord = None
913 meta = yo._meta
914 offset = meta.header.recordlength()
915 fields = yo._list_fields(field_specs, sep=';')
916 for field in fields:
917 try:
918 name, format = field.split()
919 if name[0] == '_' or name[0].isdigit() or not name.replace('_','').isalnum():
920 raise DbfError("Field names cannot start with _ or digits, and can only contain the _, letters, and digits")
921 name = name.lower()
922 if name in meta.fields:
923 raise DbfError("Field '%s' already exists" % name)
924 field_type = format[0].upper()
925 if len(name) > 10:
926 raise DbfError("Maximum field name length is 10. '%s' is %d characters long." % (name, len(name)))
927 if not field_type in meta.fieldtypes.keys():
928 raise DbfError("Unknown field type: %s" % field_type)
929 length, decimals = yo._meta.fieldtypes[field_type]['Init'](format)
930 except ValueError:
931 raise DbfError("invalid field specifier: %s" % field)
932 start = offset
933 end = offset + length
934 offset = end
935 meta.fields.append(name)
936 meta[name] = {'type':field_type, 'start':start, 'length':length, 'end':end, 'decimals':decimals, 'flags':0}
937 if meta[name]['type'] in yo._memotypes and meta.memo is None:
938 meta.memo = yo._memoClass(meta)
939 for record in yo:
940 record[name] = meta.fieldtypes[field_type]['Blank']()
941 yo._buildHeaderFields()
942 yo._updateDisk()
943 - def append(yo, kamikaze='', drop=False, multiple=1):
944 "adds <multiple> blank records, and fills fields with dict/tuple values if present"
945 if not yo.field_count():
946 raise DbfError("No fields defined, cannot append")
947 dictdata = False
948 tupledata = False
949 if not isinstance(kamikaze, _DbfRecord):
950 if isinstance(kamikaze, dict):
951 dictdata = kamikaze
952 kamikaze = ''
953 elif isinstance(kamikaze, tuple):
954 tupledata = kamikaze
955 kamikaze = ''
956 newrecord = _DbfRecord(recnum=yo._meta.header.recordcount(), layout=yo._meta, kamikaze=kamikaze)
957 yo._table.append(newrecord)
958 yo._index.append(yo._meta.header.recordcount())
959 yo._meta.header.recordcount(yo._meta.header.recordcount() + 1)
960 if dictdata:
961 newrecord.gather_fields(dictdata, drop)
962 elif tupledata:
963 for index, item in enumerate(tupledata):
964 newrecord[index] = item
965 elif kamikaze == str:
966 for field in yo._meta.memofields:
967 newrecord[field] = ''
968 elif kamikaze:
969 for field in yo._meta.memofields:
970 newrecord[field] = kamikaze[field]
971 multiple -= 1
972 if multiple:
973 data = newrecord._data
974 single = yo._meta.header.recordcount()
975 total = single + multiple
976 while single < total:
977 multi_record = _DbfRecord(single, yo._meta, kamikaze=data)
978 yo._table.append(multi_record)
979 yo._index.append(single)
980 for field in yo._meta.memofields:
981 multi_record[field] = newrecord[field]
982 single += 1
983 yo._meta.header.recordcount(total)
984 yo._meta.current = yo._meta.header.recordcount() - 1
985 newrecord = multi_record
986 yo._updateDisk(headeronly=True)
987 return newrecord
989 "moves record pointer to previous usable record; returns True if no more usable records"
990 while yo._meta.current > 0:
991 yo._meta.current -= 1
992 if yo.use_deleted() or not yo.current().has_been_deleted():
993 break
994 else:
995 yo._meta.current = -1
996 return True
997 return False
998 - def bottom(yo, get_record=False):
999 """sets record pointer to bottom of table
1000 if get_record, seeks to and returns last (non-deleted) record
1001 DbfError if table is empty
1002 Bof if all records deleted and use_deleted() is False"""
1003 yo._meta.current = yo._meta.header.recordcount()
1004 if get_record:
1005 try:
1006 return yo.prev()
1007 except Bof:
1008 yo._meta.current = yo._meta.header.recordcount()
1009 raise Eof()
1010 - def close(yo, keep_table=False, keep_memos=False):
1011 """closes disk files
1012 ensures table data is available if keep_table
1013 ensures memo data is available if keep_memos"""
1014 if keep_table:
1015 yo._table
1016 else:
1017 if '_index' in dir(yo):
1018 del yo._table
1019 del yo._index
1020 yo._meta.inmemory = True
1021 if yo._meta.ondisk:
1022 yo._meta.dfd.close()
1023 yo._meta.dfd = None
1024 if '_index' in dir(yo):
1025 yo._read_only = True
1026 else:
1027 yo._meta_only = True
1028 if yo._meta.mfd is not None:
1029 if not keep_memos:
1030 yo._meta.ignorememos = True
1031 else:
1032 memo_fields = []
1033 for field in yo.field_names():
1034 if yo.is_memotype(field):
1035 memo_fields.append(field)
1036 for record in yo:
1037 for field in memo_fields:
1038 record[field] = record[field]
1039 yo._meta.mfd.close()
1040 yo._meta.mfd = None
1041 yo._meta.ondisk = False
1042 - def codepage(yo, cp=None):
1043 result = yo._meta.header.codepage(cp)
1044 if cp is None:
1045 return "%s (%s)" % code_pages[result]
1046 else:
1047 yo._meta.decoder = codecs.getdecoder(code_pages[result][0])
1048 yo._meta.encoder = codecs.getencoder(code_pages[result][0])
1050 "creates a backup table -- ignored if memory table"
1051 if new_name is None:
1052 new_name = os.path.splitext(yo.filename())[0] + '_backup'
1053 if yo.filename().startswith(':memory:'):
1054 return
1055 fields = yo._list_fields(fields)
1056 bkup_field_specs = yo.structure(fields)
1057 bkup_table = yo.__class__(filename=new_name, field_specs=bkup_field_specs)
1058 for record in yo:
1059 bkup_table.append(record.scatter_fields(), drop=True)
1060 bkup_table.close()
1062 "returns current logical record, or its index"
1063 if yo._meta.current < 0:
1064 raise Bof()
1065 elif yo._meta.current >= yo._meta.header.recordcount():
1066 raise Eof()
1067 if index:
1068 return yo._meta.current
1069 return yo._table[yo._index[yo._meta.current]]
1071 """removes field(s) from the table
1072 creates backup files with _backup appended to the file name,
1073 then modifies current structure"""
1074 doomed = yo._list_fields(doomed)
1075 for victim in doomed:
1076 if victim not in yo._meta.fields:
1077 raise DbfError("field %s not in table -- delete aborted" % victim)
1078 all_records = [record for record in yo]
1079 yo.create_backup()
1080 for victim in doomed:
1081 yo._meta.fields.pop(yo._meta.fields.index(victim))
1082 start = yo._meta[victim]['start']
1083 end = yo._meta[victim]['end']
1084 for record in yo:
1085 record._data = record._data[:start] + record._data[end:]
1086 for field in yo._meta.fields:
1087 if yo._meta[field]['start'] == end:
1088 end = yo._meta[field]['end']
1089 yo._meta[field]['start'] = start
1090 yo._meta[field]['end'] = start + yo._meta[field]['length']
1091 start = yo._meta[field]['end']
1092 yo._buildHeaderFields()
1093 yo._updateDisk()
1104 - def export(yo, records=None, filename=None, field_specs=None, format='csv', header=True):
1105 """writes the table using CSV or tab-delimited format, using the filename
1106 given if specified, otherwise the table name"""
1107 if filename is None:
1108 filename = yo.filename()
1109 field_specs = yo._list_fields(field_specs)
1110 if records is None:
1111 records = yo
1112 format = format.lower()
1113 if format not in ('csv', 'tab'):
1114 raise DbfError("export format: csv or tab, not %s" % format)
1115 base, ext = os.path.splitext(filename)
1116 if ext.lower() in ('', '.dbf'):
1117 filename = base + "." + format
1118 fd = open(filename, 'wb')
1119 try:
1120 if format == 'csv':
1121 csvfile = csv.writer(fd, dialect='dbf')
1122 if header:
1123 csvfile.writerow(field_specs)
1124 for record in records:
1125 fields = []
1126 for fieldname in field_specs:
1127 fields.append(record[fieldname])
1128 csvfile.writerow(fields)
1129 else:
1130 if header:
1131 fd.write('\t'.join(field_specs) + '\n')
1132 for record in records:
1133 fields = []
1134 for fieldname in field_specs:
1135 fields.append(str(record[fieldname]))
1136 fd.write('\t'.join(fields) + '\n')
1137 finally:
1138 fd.close()
1139 fd = None
1140 return len(records)
1142 "returns record at physical_index[recno]"
1143 return yo._table[recno]
1144 - def goto(yo, criteria):
1145 """changes the record pointer to the first matching (non-deleted) record
1146 criteria should be either a tuple of tuple(value, field, func) triples,
1147 or an integer to go to"""
1148 if isinstance(criteria, int):
1149 if not -yo._meta.header.recordcount() <= criteria < yo._meta.header.recordcount():
1150 raise IndexError("Record %d does not exist" % criteria)
1151 if criteria < 0:
1152 criteria += yo._meta.header.recordcount()
1153 yo._meta.current = criteria
1154 return yo.current()
1155 criteria = _normalize_tuples(tuples=criteria, length=3, filler=[_nop])
1156 specs = tuple([(field, func) for value, field, func in criteria])
1157 match = tuple([value for value, field, func in criteria])
1158 current = yo.current(index=True)
1159 matchlen = len(match)
1160 while not yo.Eof():
1161 record = yo.current()
1162 results = record(*specs)
1163 if results == match:
1164 return record
1165 return yo.goto(current)
1166 - def index(yo, sort=None, reverse=False):
1187 "returns True if name is a memo type field"
1188 return yo._meta[name]['type'] in yo._memotypes
1189 - def new(yo, filename, _field_specs=None):
1190 "returns a new table of the same type"
1191 if _field_specs is None:
1192 _field_specs = yo.structure()
1193 if filename != ':memory:':
1194 path, name = os.path.split(filename)
1195 if path == "":
1196 filename = os.path.join(os.path.split(yo.filename)[0], filename)
1197 elif name == "":
1198 filename = os.path.join(path, os.path.split(yo.filename)[1])
1199 return yo.__class__(filename, _field_specs)
1201 "set record pointer to next (non-deleted) record, and return it"
1202 if yo.eof():
1203 raise Eof()
1204 return yo.current()
1205 - def pack(yo, _pack=True):
1206 "physically removes all deleted records"
1207 newtable = []
1208 newindex = []
1209 i = 0
1210 for record in yo._table:
1211 if record.has_been_deleted() and _pack:
1212 record._recnum = -1
1213 else:
1214 record._recnum = i
1215 newtable.append(record)
1216 newindex.append(i)
1217 i += 1
1218 yo._table = newtable
1219 yo._index = newindex
1220 yo._meta.header.recordcount(i)
1221 yo._current = -1
1222 yo._meta.index = ''
1223 yo._updateDisk()
1225 "set record pointer to previous (non-deleted) record, and return it"
1226 if yo.bof():
1227 raise Bof
1228 return yo.current()
1229 - def query(yo, sql=None, python=None):
1230 "uses exec to perform python queries on the table"
1231 if python is None:
1232 raise DbfError("query: python parameter must be specified")
1233 possible = DbfList(desc="%s --> %s" % (yo.filename(), python))
1234 query_result = {}
1235 select = 'query_result["keep"] = %s' % python
1236 g = {}
1237 for record in yo:
1238 query_result['keep'] = False
1239 g['query_result'] = query_result
1240 exec select in g, record
1241 if query_result['keep']:
1242 possible.append(yo, record)
1243 return possible
1245 "renames an existing field"
1246 if not oldname in yo._meta.fields:
1247 raise DbfError("field --%s-- does not exist -- cannot rename it." % oldname)
1248 if newname[0] == '_' or newname[0].isdigit() or not newname.replace('_','').isalnum():
1249 raise DbfError("field names cannot start with _ or digits, and can only contain the _, letters, and digits")
1250 newname = newname.lower()
1251 if newname in yo._meta.fields:
1252 raise DbfError("field --%s-- already exists" % newname)
1253 if len(newname) > 10:
1254 raise DbfError("maximum field name length is 10. '%s' is %d characters long." % (newname, len(newname)))
1255 yo._meta[newname] = yo._meta[oldname]
1256 yo._meta.fields[yo._meta.fields.index(oldname)] = newname
1257 yo._buildHeaderFields()
1258 yo._updateDisk(headeronly=True)
1259 - def search(yo, match, fuzzy=None, indices=False):
1260 """searches using a binary algorythm
1261 looking for records that match the criteria in match, which is a tuple
1262 with a data item per ordered field. table must be sorted. if index,
1263 returns a list of records' indices from the current sort order.
1264 """
1265 if yo._meta.index is None:
1266 raise DbfError('table must be indexed to use Search')
1267 matchlen = len(match)
1268 if fuzzy:
1269 matchlen -= 1
1270 fuzzy_match = match[-1]
1271 fuzzy_field = yo._meta.index[matchlen][0]
1272 match = match[:-1]
1273 records = DbfList(desc="%s --> search: index=%s, match=%s, fuzzy=%s(%s))" % (yo.filename(), yo.index(), match, fuzzy.__name__, fuzzy_match))
1274 else:
1275 records = DbfList(desc="%s --> search: index=%s, match=%s)" % (yo.filename(), yo.index(), match))
1276 if indices:
1277 records = []
1278 if not isinstance(match, tuple):
1279 match = tuple(match)
1280 segment = len(yo)
1281 current = 0
1282 toosoon = True
1283 notFound = True
1284 while notFound:
1285 segment = segment // 2
1286 if toosoon:
1287 current += segment
1288 else:
1289 current -= segment
1290 if current % 2:
1291 segment += 1
1292 if current == len(yo) or segment == 0:
1293 break
1294 value = yo._meta.orderresults[yo[current].record_number()][:matchlen]
1295 if value < match:
1296 toosoon = True
1297 elif value > match:
1298 toosoon = False
1299 else:
1300 notFound = False
1301 break
1302 if current == 0:
1303 break
1304 if notFound:
1305 return records
1306 while current > 0:
1307 current -= 1
1308 value = yo._meta.orderresults[yo[current].record_number()][:matchlen]
1309 if value != match:
1310 current += 1
1311 break
1312 while True:
1313 value = yo._meta.orderresults[yo[current].record_number()][:matchlen]
1314 if value != match:
1315 break
1316 if yo.use_deleted() or not yo[current].has_been_deleted():
1317 if indices:
1318 records.append(current)
1319 else:
1320 records.append(yo, yo[current])
1321 current += 1
1322 if current == len(yo):
1323 break
1324 if fuzzy:
1325 if indices:
1326 records = [rec for rec in records if fuzzy(yo[rec][fuzzy_field]) == fuzzy_match]
1327 else:
1328 records[:] = [rec for rec in records if fuzzy(rec[fuzzy_field]) == fuzzy_match]
1329 return records
1330 - def size(yo, field):
1331 "returns size of field as a tuple of (length, decimals)"
1332 if field in yo:
1333 return (yo._meta[field]['length'], yo._meta[field]['decimals'])
1334 raise DbfError("%s is not a field in %s" % (field, yo.filename()))
1336 """return list of fields suitable for creating same table layout
1337 @param fields: list of fields or None for all fields"""
1338 field_specs = []
1339 fields = yo._list_fields(fields)
1340 try:
1341 for name in fields:
1342 field_specs.append(yo._fieldLayout(yo.field_names().index(name)))
1343 except ValueError:
1344 raise DbfError("field --%s-- does not exist" % name)
1345 return field_specs
1346 - def top(yo, get_record=False):
1347 """sets record pointer to top of table; if get_record, seeks to and returns first (non-deleted) record
1348 DbfError if table is empty
1349 Eof if all records are deleted and use_deleted() is False"""
1350 yo._meta.current = -1
1351 if get_record:
1352 try:
1353 return yo.next()
1354 except Eof:
1355 yo._meta.current = -1
1356 raise Bof()
1357 - def type(yo, field):
1358 "returns type of field"
1359 if field in yo:
1360 return yo._meta[field]['type']
1361 raise DbfError("%s is not a field in %s" % (field, yo.filename()))
1362 - def zap(yo, areyousure=False):
1363 """removes all records from table -- this cannot be undone!
1364 areyousure must be True, else error is raised"""
1365 if areyousure:
1366 yo._table = []
1367 yo._index = []
1368 yo._meta.header.recordcount(0)
1369 yo._current = -1
1370 yo._meta.index = ''
1371 yo._updateDisk()
1372 else:
1373 raise DbfError("You must say you are sure to wipe the table")
1374
1376 """Provides an interface for working with dBase III tables."""
1377 _version = 'dBase III Plus'
1378 _versionabbv = 'db3'
1379 _fieldtypes = {
1380 'C' : {'Type':'Character', 'Retrieve':io.retrieveCharacter, 'Update':io.updateCharacter, 'Blank':str, 'Init':io.addCharacter},
1381 'D' : {'Type':'Date', 'Retrieve':io.retrieveDate, 'Update':io.updateDate, 'Blank':Date.today, 'Init':io.addDate},
1382 'L' : {'Type':'Logical', 'Retrieve':io.retrieveLogical, 'Update':io.updateLogical, 'Blank':bool, 'Init':io.addLogical},
1383 'M' : {'Type':'Memo', 'Retrieve':io.retrieveMemo, 'Update':io.updateMemo, 'Blank':str, 'Init':io.addMemo},
1384 'N' : {'Type':'Numeric', 'Retrieve':io.retrieveNumeric, 'Update':io.updateNumeric, 'Blank':int, 'Init':io.addNumeric} }
1385 _memoext = '.dbt'
1386 _memotypes = ('M',)
1387 _memoClass = _Db3Memo
1388 _yesMemoMask = '\x80'
1389 _noMemoMask = '\x7f'
1390 _fixed_fields = ('D','L','M')
1391 _variable_fields = ('C','N')
1392 _character_fields = ('C','M')
1393 _decimal_fields = ('N',)
1394 _numeric_fields = ('N',)
1395 _dbfTableHeader = array('c', '\x00' * 32)
1396 _dbfTableHeader[0] = '\x03'
1397 _dbfTableHeader[8:10] = array('c', io.packShortInt(33))
1398 _dbfTableHeader[10] = '\x01'
1399 _dbfTableHeader[29] = '\x03'
1400 _dbfTableHeader = _dbfTableHeader.tostring()
1401 _dbfTableHeaderExtra = ''
1402 _supported_tables = ['\x03', '\x83']
1403 _read_only = False
1404 _meta_only = False
1405 _use_deleted = True
1407 "dBase III specific"
1408 if yo._meta.header.version() == '\x83':
1409 try:
1410 yo._meta.memo = yo._memoClass(yo._meta)
1411 except:
1412 yo._meta.dfd.close()
1413 yo._meta.dfd = None
1414 raise
1415 if not yo._meta.ignorememos:
1416 for field in yo._meta.fields:
1417 if yo._meta[field]['type'] in yo._memotypes:
1418 if yo._meta.header.version() != '\x83':
1419 yo._meta.dfd.close()
1420 yo._meta.dfd = None
1421 raise DbfError("Table structure corrupt: memo fields exist, header declares no memos")
1422 elif not os.path.exists(yo._meta.memoname):
1423 yo._meta.dfd.close()
1424 yo._meta.dfd = None
1425 raise DbfError("Table structure corrupt: memo fields exist without memo file")
1426 break
1428 "builds the FieldList of names, types, and descriptions"
1429 offset = 1
1430 fieldsdef = yo._meta.header.fields()
1431 if len(fieldsdef) % 32 != 0:
1432 raise DbfError("field definition block corrupt: %d bytes in size" % len(fieldsdef))
1433 if len(fieldsdef) // 32 != yo.field_count():
1434 raise DbfError("Header shows %d fields, but field definition block has %d fields" % (yo.field_count(), len(fieldsdef)//32))
1435 for i in range(yo.field_count()):
1436 fieldblock = fieldsdef[i*32:(i+1)*32]
1437 name = io.unpackStr(fieldblock[:11])
1438 type = fieldblock[11]
1439 if not type in yo._meta.fieldtypes:
1440 raise DbfError("Unknown field type: %s" % type)
1441 start = offset
1442 length = ord(fieldblock[16])
1443 offset += length
1444 end = start + length
1445 decimals = ord(fieldblock[17])
1446 flags = ord(fieldblock[18])
1447 yo._meta.fields.append(name)
1448 yo._meta[name] = {'type':type,'start':start,'length':length,'end':end,'decimals':decimals,'flags':flags}
1450 version = 'Provides an interface for working with Visual FoxPro 6 tables'
1451 _versionabbv = 'vfp'
1452 _fieldtypes = {
1453 'C' : {'Type':'Character', 'Retrieve':io.retrieveCharacter, 'Update':io.updateCharacter, 'Blank':str, 'Init':io.addCharacter},
1454 'Y' : {'Type':'Currency', 'Retrieve':io.retrieveCurrency, 'Update':io.updateCurrency, 'Blank':Decimal(), 'Init':io.addVfpCurrency},
1455 'B' : {'Type':'Double', 'Retrieve':io.retrieveDouble, 'Update':io.updateDouble, 'Blank':float, 'Init':io.addVfpDouble},
1456 'F' : {'Type':'Float', 'Retrieve':io.retrieveNumeric, 'Update':io.updateNumeric, 'Blank':float, 'Init':io.addVfpNumeric},
1457 'N' : {'Type':'Numeric', 'Retrieve':io.retrieveNumeric, 'Update':io.updateNumeric, 'Blank':int, 'Init':io.addVfpNumeric},
1458 'I' : {'Type':'Integer', 'Retrieve':io.retrieveInteger, 'Update':io.updateInteger, 'Blank':int, 'Init':io.addVfpInteger},
1459 'L' : {'Type':'Logical', 'Retrieve':io.retrieveLogical, 'Update':io.updateLogical, 'Blank':bool, 'Init':io.addLogical},
1460 'D' : {'Type':'Date', 'Retrieve':io.retrieveDate, 'Update':io.updateDate, 'Blank':Date.today, 'Init':io.addDate},
1461 'T' : {'Type':'DateTime', 'Retrieve':io.retrieveVfpDateTime, 'Update':io.updateVfpDateTime, 'Blank':DateTime.now, 'Init':io.addVfpDateTime},
1462 'M' : {'Type':'Memo', 'Retrieve':io.retrieveVfpMemo, 'Update':io.updateVfpMemo, 'Blank':str, 'Init':io.addVfpMemo},
1463 'G' : {'Type':'General', 'Retrieve':io.retrieveVfpMemo, 'Update':io.updateVfpMemo, 'Blank':str, 'Init':io.addVfpMemo},
1464 'P' : {'Type':'Picture', 'Retrieve':io.retrieveVfpMemo, 'Update':io.updateVfpMemo, 'Blank':str, 'Init':io.addVfpMemo},
1465 '0' : {'Type':'_NullFlags', 'Retrieve':io.unsupportedType, 'Update':io.unsupportedType, 'Blank':int, 'Init':None} }
1466 _memoext = '.fpt'
1467 _memotypes = ('G','M','P')
1468 _memoClass = _VfpMemo
1469 _yesMemoMask = '\x30'
1470 _noMemoMask = '\x30'
1471 _fixed_fields = ('B','D','G','I','L','M','P','T','Y')
1472 _variable_fields = ('C','F','N')
1473 _character_fields = ('C','M')
1474 _decimal_fields = ('F','N')
1475 _numeric_fields = ('B','F','I','N','Y')
1476 _supported_tables = ('\x30',)
1477 _dbfTableHeader = array('c', '\x00' * 32)
1478 _dbfTableHeader[0] = '\x30'
1479 _dbfTableHeader[8:10] = array('c', io.packShortInt(33+263))
1480 _dbfTableHeader[10] = '\x01'
1481 _dbfTableHeader[29] = '\x03'
1482 _dbfTableHeader = _dbfTableHeader.tostring()
1483 _dbfTableHeaderExtra = '\x00' * 263
1484 _use_deleted = True
1486 if os.path.exists(yo._meta.memoname):
1487 try:
1488 yo._meta.memo = yo._memoClass(yo._meta)
1489 except:
1490 yo._meta.dfd.close()
1491 yo._meta.dfd = None
1492 raise
1493 if not yo._meta.ignorememos:
1494 for field in yo._meta.fields:
1495 if yo._meta[field]['type'] in yo._memotypes:
1496 if not os.path.exists(yo._meta.memoname):
1497 yo._meta.dfd.close()
1498 yo._meta.dfd = None
1499 raise DbfError("Table structure corrupt: memo fields exist without memo file")
1500 break
1502 "builds the FieldList of names, types, and descriptions"
1503 offset = 1
1504 fieldsdef = yo._meta.header.fields()
1505 for i in range(yo.field_count()):
1506 fieldblock = fieldsdef[i*32:(i+1)*32]
1507 name = io.unpackStr(fieldblock[:11])
1508 type = fieldblock[11]
1509 if not type in yo._meta.fieldtypes:
1510 raise DbfError("Unknown field type: %s" % type)
1511 elif type == '0':
1512 return
1513 start = io.unpackLongInt(fieldblock[12:16])
1514 length = ord(fieldblock[16])
1515 offset += length
1516 end = start + length
1517 decimals = ord(fieldblock[17])
1518 flags = ord(fieldblock[18])
1519 yo._meta.fields.append(name)
1520 yo._meta[name] = {'type':type,'start':start,'length':length,'end':end,'decimals':decimals,'flags':flags}
1522 "list of Dbf records, with set-like behavior"
1523 _desc = ''
1524 - def __init__(yo, table=None, new_records=None, desc=None):
1525 yo._list = []
1526 yo._set = set()
1527 yo._current = -1
1528 if isinstance(new_records, DbfList):
1529 yo._list = new_records._list
1530 for item in yo._list:
1531 yo._set.add(item)
1532 yo._current = 0
1533 elif new_records is not None:
1534 for record in new_records:
1535 item = (table, record.record_number())
1536 if item not in yo._set:
1537 yo._set.add(item)
1538 yo._list.append(item)
1539 yo._current = 0
1540 if desc is not None:
1541 yo._desc = desc
1543 if isinstance(other, DbfList):
1544 result = DbfList()
1545 result._set = set(yo._set)
1546 result._list[:] = yo._list[:]
1547 for item in other._list:
1548 if item not in result._set:
1549 result._set.add(item)
1550 result._list.append(item)
1551 result._current = 0 if result else -1
1552 return result
1553 raise NotImplemented
1555 if isinstance(key, int):
1556 loc = yo._current - len(yo._list) + 1
1557 item = yo._list.pop[key]
1558 yo._set.remove(item)
1559 if loc > 0:
1560 yo._current = len(yo._list)
1561 elif loc == 0 or yo._current >= len(yo._list):
1562 yo._current = len(yo._list) - 1
1563 elif isinstance(key, slice):
1564 loc = yo._current - len(yo._list) + 1
1565 yo._set.difference_update(yo._list[key])
1566 yo._list.__delitem__(key)
1567 item = yo._list.pop(index)
1568 if loc > 0:
1569 yo._current = len(yo._list)
1570 elif loc == 0 or yo._current >= len(yo._list):
1571 yo._current = len(yo._list) - 1
1572 else:
1573 raise TypeError
1575 if isinstance(key, int):
1576 count = len(yo._list)
1577 if not -count <= key < count:
1578 raise IndexError("Record %d is not in list." % key)
1579 return yo._get_record(*yo._list[key])
1580 elif isinstance(key, slice):
1581 result = DbfList()
1582 result._list[:] = yo._list[key]
1583 result._set.update(result._set)
1584 result._current = 0 if result else -1
1585 return result
1586 else:
1587 raise TypeError
1589 return (table.get_record(recno) for table, recno in yo._list)
1591 return len(yo._list)
1597 if yo._desc:
1598 return "DbfList(%s - %d records)" % (yo._desc, len(yo._list))
1599 else:
1600 return "DbfList(%d records)" % len(yo._list)
1602 if isinstance(other, DbfList):
1603 result = DbfList()
1604 result._list[:] = other._list[:]
1605 result._set = set(other._set)
1606 lost = set()
1607 for item in yo._list:
1608 if item in result._list:
1609 result._set.remove(item)
1610 lost.add(item)
1611 result._list = [item for item in result._list if item not in lost]
1612 result._current = 0 if result else -1
1613 return result
1614 raise NotImplemented
1616 if isinstance(other, DbfList):
1617 result = DbfList()
1618 result._list[:] = yo._list[:]
1619 result._set = set(yo._set)
1620 lost = set()
1621 for item in other._list:
1622 if item in result._set:
1623 result._set.remove(item)
1624 lost.add(item)
1625 result._list = [item for item in result._list if item not in lost]
1626 result._current = 0 if result else -1
1627 return result
1628 raise NotImplemented
1630 if record is None:
1631 item = table
1632 else:
1633 item = table, record.record_number()
1634 if item not in yo._set:
1635 yo._set.add(item)
1636 yo._list.append(item)
1638 if table is rec_no is None:
1639 table, rec_no = yo._list[yo._current]
1640 return table.get_record(rec_no)
1641 - def append(yo, table, new_record):
1642 yo._maybe_add(table, new_record)
1643 yo._current = len(yo._list) - 1
1645 if yo._list:
1646 yo._current = len(yo._list) - 1
1647 return _get_record()
1648 raise DbfError("DbfList is empty")
1650 if yo._current < 0:
1651 raise Bof()
1652 elif yo._current == len(yo._list):
1653 raise Eof()
1654 return _get_record()
1655 - def extend(yo, table=None, new_records=None):
1656 if isinstance(new_records, DbfList):
1657 for item in new_records:
1658 yo._maybe_add(item)
1659 else:
1660 for record in new_records:
1661 yo._maybe_add(table, record)
1662 yo._current = len(yo._list) - 1
1663 - def goto(yo, index_number):
1664 if yo._list:
1665 if 0 <= index_number <= len(yo._list):
1666 yo._current = index_number
1667 return _get_record()
1668 raise DbfError("index %d not in DbfList of %d records" % (index_number, len(yo._list)))
1669 raise DbfError("DbfList is empty")
1670 - def insert(yo, i, table, record):
1671 item = table, record.record_number()
1672 if item not in yo._set:
1673 yo._set.add(item)
1674 yo._list.insert(i, item)
1676 if yo._current < len(yo._list):
1677 yo._current += 1
1678 if yo._current < len(yo._list):
1679 return _get_record()
1680 raise Eof()
1681 - def pop(yo, index=None):
1682 loc = yo._current - len(yo._list) + 1
1683 if index is None:
1684 table, recno = yo._list.pop()
1685 yo._set.remove((table, recno))
1686 else:
1687 table, recno = yo._list.pop(index)
1688 yo._set.remove((table, recno))
1689 if loc > 0:
1690 yo._current = len(yo._list)
1691 elif loc == 0 or yo._current >= len(yo._list):
1692 yo._current = len(yo._list) - 1
1693 return _get_record(table, recno)
1695 if yo._current >= 0:
1696 yo._current -= 1
1697 if yo._current > -1:
1698 return yo._get_record()
1699 raise Bof()
1703 if yo._list:
1704 yo._current = 0
1705 return yo._get_record()
1706 raise DbfError("DbfList is empty")
1707 - def sort(yo, key=None, reverse=None):
1718 csv.register_dialect('dbf', DbfCsv)
1719
1721 "returns parameter unchanged"
1722 return value
1724 "ensures each tuple is the same length, using filler[-missing] for the gaps"
1725 final = []
1726 for t in tuples:
1727 if len(t) < length:
1728 final.append( tuple([item for item in t] + filler[len(t)-length:]) )
1729 else:
1730 final.append(t)
1731 return tuple(final)
1733 if cp not in code_pages:
1734 for code_page in sorted(code_pages.keys()):
1735 sd, ld = code_pages[code_page]
1736 if cp == sd or cp == ld:
1737 if sd is None:
1738 raise DbfError("Unsupported codepage: %s" % ld)
1739 cp = code_page
1740 break
1741 else:
1742 raise DbfError("Unsupported codepage: %s" % cp)
1743 sd, ld = code_pages[cp]
1744 return cp, sd, ld
1745 -def ascii(new_setting=None):
1752 -def codepage(cp=None):
1753 "get/set default codepage for any new tables"
1754 global default_codepage
1755 cp, sd, ld = _codepage_lookup(cp or default_codepage)
1756 default_codepage = sd
1757 return "%s (LDID: 0x%02x - %s)" % (sd, ord(cp), ld)
1765 version = 'dBase IV w/memos (non-functional)'
1766 _versionabbv = 'db4'
1767 _fieldtypes = {
1768 'C' : {'Type':'Character', 'Retrieve':io.retrieveCharacter, 'Update':io.updateCharacter, 'Blank':str, 'Init':io.addCharacter},
1769 'Y' : {'Type':'Currency', 'Retrieve':io.retrieveCurrency, 'Update':io.updateCurrency, 'Blank':Decimal(), 'Init':io.addVfpCurrency},
1770 'B' : {'Type':'Double', 'Retrieve':io.retrieveDouble, 'Update':io.updateDouble, 'Blank':float, 'Init':io.addVfpDouble},
1771 'F' : {'Type':'Float', 'Retrieve':io.retrieveNumeric, 'Update':io.updateNumeric, 'Blank':float, 'Init':io.addVfpNumeric},
1772 'N' : {'Type':'Numeric', 'Retrieve':io.retrieveNumeric, 'Update':io.updateNumeric, 'Blank':int, 'Init':io.addVfpNumeric},
1773 'I' : {'Type':'Integer', 'Retrieve':io.retrieveInteger, 'Update':io.updateInteger, 'Blank':int, 'Init':io.addVfpInteger},
1774 'L' : {'Type':'Logical', 'Retrieve':io.retrieveLogical, 'Update':io.updateLogical, 'Blank':bool, 'Init':io.addLogical},
1775 'D' : {'Type':'Date', 'Retrieve':io.retrieveDate, 'Update':io.updateDate, 'Blank':Date.today, 'Init':io.addDate},
1776 'T' : {'Type':'DateTime', 'Retrieve':io.retrieveVfpDateTime, 'Update':io.updateVfpDateTime, 'Blank':DateTime.now, 'Init':io.addVfpDateTime},
1777 'M' : {'Type':'Memo', 'Retrieve':io.retrieveMemo, 'Update':io.updateMemo, 'Blank':str, 'Init':io.addMemo},
1778 'G' : {'Type':'General', 'Retrieve':io.retrieveMemo, 'Update':io.updateMemo, 'Blank':str, 'Init':io.addMemo},
1779 'P' : {'Type':'Picture', 'Retrieve':io.retrieveMemo, 'Update':io.updateMemo, 'Blank':str, 'Init':io.addMemo},
1780 '0' : {'Type':'_NullFlags', 'Retrieve':io.unsupportedType, 'Update':io.unsupportedType, 'Blank':int, 'Init':None} }
1781 _memoext = '.dbt'
1782 _memotypes = ('G','M','P')
1783 _memoClass = _VfpMemo
1784 _yesMemoMask = '\x8b'
1785 _noMemoMask = '\x04'
1786 _fixed_fields = ('B','D','G','I','L','M','P','T','Y')
1787 _variable_fields = ('C','F','N')
1788 _character_fields = ('C','M')
1789 _decimal_fields = ('F','N')
1790 _numeric_fields = ('B','F','I','N','Y')
1791 _supported_tables = ('\x04', '\x8b')
1792 _dbfTableHeader = ['\x00'] * 32
1793 _dbfTableHeader[0] = '\x8b'
1794 _dbfTableHeader[10] = '\x01'
1795 _dbfTableHeader[29] = '\x03'
1796 _dbfTableHeader = ''.join(_dbfTableHeader)
1797 _dbfTableHeaderExtra = ''
1798 _use_deleted = True
1800 "dBase III specific"
1801 if yo._meta.header.version == '\x8b':
1802 try:
1803 yo._meta.memo = yo._memoClass(yo._meta)
1804 except:
1805 yo._meta.dfd.close()
1806 yo._meta.dfd = None
1807 raise
1808 if not yo._meta.ignorememos:
1809 for field in yo._meta.fields:
1810 if yo._meta[field]['type'] in yo._memotypes:
1811 if yo._meta.header.version != '\x8b':
1812 yo._meta.dfd.close()
1813 yo._meta.dfd = None
1814 raise DbfError("Table structure corrupt: memo fields exist, header declares no memos")
1815 elif not os.path.exists(yo._meta.memoname):
1816 yo._meta.dfd.close()
1817 yo._meta.dfd = None
1818 raise DbfError("Table structure corrupt: memo fields exist without memo file")
1819 break
1820