Skip to content

Commit 1e58288

Browse files
committed
start rebasing the generated bindings (EasyRPG#205)
1 parent 04519aa commit 1e58288

File tree

9 files changed

+598
-0
lines changed

9 files changed

+598
-0
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ Makefile*
2828
/bin/platforms
2929
*.cbp
3030
translations/
31+
/src/binding/generated/
3132

3233
# flatpak
3334
/.flatpak-builder

binding_generator/README.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# EasyRPG Editor binding code generator
2+
3+
Files in the `generated` subdirectory of `src/binding` directory are
4+
regenerated automatically when running the `generate.py` script.
5+
6+
These source code files are generated with the `templates` subfolder.
7+
As first argument pass the path to the liblcf `generator/csv` directory.
8+
9+
## Requirements
10+
11+
* Python interpreter 3.
12+
* Jinja2 template engine.
13+
* pandas data analysis library
14+
* CSV files can be modified with any text editor or (at your option) any
15+
spreadsheet editor.
16+
17+
18+
## Usage
19+
20+
1. Open one of the .csv files in the `csv` subdirectory to edit or add new
21+
data then save file changes.
22+
2. Run the script file `generate.py` from the `generator` folder.
23+
3. Add any newly created .cpp and .h files to project files if needed.
24+
4. Recompile EasyRPG Editor.

binding_generator/generate.py

Lines changed: 364 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,364 @@
1+
#!/usr/bin/env python3
2+
3+
import pandas as pd
4+
import numpy as np
5+
import sys
6+
import os
7+
import re
8+
import shutil
9+
import filecmp
10+
from collections import namedtuple, OrderedDict
11+
from itertools import groupby
12+
import operator
13+
14+
from jinja2 import Environment, PackageLoader, select_autoescape
15+
env = Environment(
16+
loader=PackageLoader('templates', ''),
17+
autoescape=select_autoescape([]),
18+
keep_trailing_newline=True,
19+
lstrip_blocks=True
20+
)
21+
22+
gen_dir = os.path.dirname(os.path.abspath(__file__))
23+
csv_dir = sys.argv[1]
24+
dest_dir = os.path.abspath(os.path.join(gen_dir, "..", "src", "binding", "generated"))
25+
tmp_dir = os.path.join(dest_dir, "tmp")
26+
27+
qt_types = {
28+
'Boolean': 'bool',
29+
'Double': 'double',
30+
'UInt8': 'int',
31+
'UInt16': 'int',
32+
'UInt32': 'int',
33+
'Int8': 'int',
34+
'Int16': 'int',
35+
'Int32': 'int',
36+
'String': 'QString',
37+
'DBString': 'QString',
38+
'DBBitArray': 'QVector<bool>',
39+
}
40+
41+
# Additional Jinja 2 functions
42+
def qt_type(ty, prefix=True):
43+
if ty in qt_types:
44+
return qt_types[ty]
45+
46+
if ty == "DatabaseVersion":
47+
return 'int'
48+
49+
if ty == "EmptyBlock":
50+
return 'void'
51+
52+
m = re.match(r'Count<(.*)>', ty)
53+
if m:
54+
return qt_type(m.group(1), prefix)
55+
56+
m = re.match(r'Array<(.*):(.*)>', ty)
57+
if m:
58+
return 'ArrayAdapter*'
59+
60+
m = re.match(r'(Vector|Array)<(.*)>', ty)
61+
if m:
62+
if type_is_struct(m.group(2)):
63+
return 'ArrayAdapter*'
64+
return 'QVector<%s>' % qt_type(m.group(2), prefix)
65+
66+
m = re.match(r'DBArray<(.*)>', ty)
67+
if m:
68+
return 'QVector<%s>' % qt_type(m.group(1), prefix)
69+
70+
m = re.match(r'Ref<(.*):(.*)>', ty)
71+
if m:
72+
return qt_type(m.group(2), prefix)
73+
74+
m = re.match(r'Ref<(.*)>', ty)
75+
if m:
76+
return 'int'
77+
78+
m = re.match(r'Enum<(.*)>', ty)
79+
if m:
80+
return 'int'
81+
82+
m = re.match(r'(.*)_Flags$', ty)
83+
if m:
84+
ty = m.expand(r'\1::Flags')
85+
if prefix:
86+
ty = "Binding::" + ty + "*"
87+
return ty
88+
89+
if prefix:
90+
ty = "Binding::" + ty + "*"
91+
92+
return ty
93+
94+
def inner_type(ty):
95+
m = re.match(r'.*?<([^:]+).*>', ty)
96+
if m:
97+
return m.group(1)
98+
return ty
99+
100+
def num_flags(flag):
101+
return len(flag)
102+
103+
def filter_structs_without_codes(structs):
104+
for struct in structs:
105+
if all(f.code for f in sfields[struct.name]):
106+
yield struct
107+
# End of Jinja 2 functions
108+
109+
int_types = {
110+
'UInt8': 'uint8_t',
111+
'UInt16': 'uint16_t',
112+
'UInt32': 'uint32_t',
113+
'Int16': 'int16_t',
114+
'Int32': 'int32_t'
115+
}
116+
117+
def struct_headers(ty, header_map):
118+
m = re.match(r'Ref<(.*):(.*)>', ty)
119+
if m:
120+
return struct_headers(m.group(2), header_map)
121+
122+
m = re.match(r'Array<(.*):(.*)>', ty)
123+
if m:
124+
return struct_headers(m.group(1), header_map)
125+
126+
m = re.match(r'(Vector|Array)<(.*)>', ty)
127+
if m:
128+
return struct_headers(m.group(2), header_map)
129+
130+
header = header_map.get(ty)
131+
if header is not None:
132+
return ['"%s.h"' % header]
133+
134+
if ty in ['Parameters', 'Equipment', 'EventCommand', 'MoveCommand', 'Rect', 'TreeMap']:
135+
return ['"%s.h"' % ty.lower()]
136+
137+
return []
138+
139+
def merge_dicts(dicts):
140+
# Merges multiple dicts into one
141+
out_dict = dicts[0]
142+
143+
for d in dicts[1:]:
144+
for k,v in d.items():
145+
if k in out_dict:
146+
# Append new values
147+
for vv in v:
148+
out_dict[k].append(vv)
149+
else:
150+
# Insert whole key
151+
out_dict[k] = v
152+
153+
return out_dict
154+
155+
def process_file(filename, namedtup):
156+
# Mapping is: All elements of the line grouped by the first column
157+
158+
path = os.path.join(csv_dir, filename)
159+
df = pd.read_csv(path, comment='#', dtype=str)
160+
df = df.fillna("")
161+
162+
lines = [ list(r) for _i, r in df.iterrows() ]
163+
164+
result = OrderedDict()
165+
for k, g in groupby(lines, operator.itemgetter(0)):
166+
result[k] = list(map(lambda x: namedtup(*x[1:]), list(g)))
167+
168+
return result
169+
170+
def get_structs(*filenames):
171+
Struct = namedtuple("Struct", "name base hasid")
172+
173+
results = list(map(lambda x: process_file(x, Struct), filenames))
174+
175+
processed_result = OrderedDict()
176+
177+
for k, struct in merge_dicts(results).items():
178+
processed_result[k] = []
179+
180+
for elem in struct:
181+
elem = Struct(elem.name, elem.base, bool(int(elem.hasid)) if elem.hasid else None)
182+
processed_result[k].append(elem)
183+
184+
processed_flat = []
185+
for filetype, struct in processed_result.items():
186+
for elem in struct:
187+
processed_flat.append(elem)
188+
189+
return processed_result, processed_flat
190+
191+
def get_fields(*filenames):
192+
Field = namedtuple("Field", "name size type code default presentifdefault is2k3 comment")
193+
194+
results = list(map(lambda x: process_file(x, Field), filenames))
195+
196+
processed_result = OrderedDict()
197+
198+
for k, field in merge_dicts(results).items():
199+
processed_result[k] = []
200+
for elem in field:
201+
if elem.size == 't':
202+
continue
203+
if not elem.type or elem.type == "EmptyBlock":
204+
continue
205+
elem = Field(
206+
elem.name,
207+
True if elem.size == 't' else False,
208+
elem.type,
209+
0 if elem.code == '' else int(elem.code, 0),
210+
elem.default,
211+
elem.presentifdefault,
212+
elem.is2k3,
213+
elem.comment)
214+
processed_result[k].append(elem)
215+
216+
return processed_result
217+
218+
def get_enums(*filenames):
219+
results = list(map(lambda x: process_file(x, namedtuple("Enum", "entry value index")), filenames))
220+
new_result = OrderedDict()
221+
222+
# Additional processing to group by the Enum Entry
223+
# Results in e.g. EventCommand -> Code -> List of (Name, Index)
224+
for k, v in merge_dicts(results).items():
225+
new_result[k] = OrderedDict()
226+
for kk, gg in groupby(v, operator.attrgetter("entry")):
227+
new_result[k][kk] = list(map(lambda x: (x.value, x.index), gg))
228+
229+
return new_result
230+
231+
def get_flags(*filenames):
232+
results = list(map(lambda x: process_file(x, namedtuple("Flag", "field is2k3")), filenames))
233+
return merge_dicts(results)
234+
235+
def get_functions(*filenames):
236+
Function = namedtuple("Function", "method static headers")
237+
238+
results = list(map(lambda x: process_file(x, Function), filenames))
239+
240+
processed_result = OrderedDict()
241+
242+
for k, field in merge_dicts(results).items():
243+
processed_result[k] = []
244+
for elem in field:
245+
elem = Function(
246+
elem.method,
247+
elem.static == 't',
248+
elem.headers)
249+
processed_result[k].append(elem)
250+
251+
return processed_result
252+
253+
def get_constants(filename='constants.csv'):
254+
return process_file(filename, namedtuple("Constant", "name type value comment"))
255+
256+
def type_is_db_string(ty):
257+
return ty == 'DBString'
258+
259+
def type_is_string(ty):
260+
return ty == 'String'
261+
262+
def type_is_array(ty):
263+
return re.match(r'(Vector|Array|DBArray)<(.*)>', ty) or ty == "DBBitArray"
264+
265+
def type_is_struct(ty):
266+
return ty in [ x.name for x in structs_flat ]
267+
268+
def type_can_write(ty):
269+
if qt_type(ty) in ["bool", "int", "double", "QString"]:
270+
return True
271+
return type_is_array(ty) and not type_is_array_of_struct(ty)
272+
273+
def type_is_array_of_struct(ty):
274+
m = re.match(r'(Vector|Array|DBArray)<(.*)>', ty)
275+
return m and type_is_struct(m.group(2).split(":")[0])
276+
277+
def openToRender(path):
278+
subdir = os.path.dirname(path)
279+
if not os.path.exists(subdir):
280+
os.makedirs(subdir)
281+
return open(path, 'w')
282+
283+
def generate():
284+
if not os.path.exists(tmp_dir):
285+
os.mkdir(tmp_dir)
286+
287+
for filetype, structlist in structs.items():
288+
for struct in structlist:
289+
filename = struct.name.lower()
290+
291+
filepath = os.path.join(tmp_dir, '%s.h' % filename)
292+
with openToRender(filepath) as f:
293+
f.write(rpg_header_tmpl.render(
294+
struct_name=struct.name,
295+
struct_base=struct.base,
296+
has_id=struct.hasid
297+
))
298+
299+
filepath = os.path.join(tmp_dir, '%s.cpp' % filename)
300+
with openToRender(filepath) as f:
301+
f.write(rpg_source_tmpl.render(
302+
struct_name=struct.name,
303+
struct_base=struct.base,
304+
has_id=struct.hasid,
305+
filename=filename
306+
))
307+
308+
for dirname, subdirlist, filelist in os.walk(tmp_dir, topdown=False):
309+
subdir = os.path.relpath(dirname, tmp_dir)
310+
311+
for tmp_file in filelist:
312+
tmp_path = os.path.join(tmp_dir, subdir, tmp_file)
313+
dest_path = os.path.join(dest_dir, subdir, tmp_file)
314+
dest_subdir = os.path.dirname(dest_path)
315+
if not os.path.exists(dest_subdir):
316+
os.mkdir(dest_subdir)
317+
if not (os.path.exists(dest_path) and filecmp.cmp(tmp_path, dest_path)):
318+
shutil.copyfile(tmp_path, dest_path)
319+
os.remove(tmp_path)
320+
os.rmdir(os.path.join(dirname))
321+
322+
def main(argv):
323+
if not os.path.exists(dest_dir):
324+
os.mkdir(dest_dir)
325+
326+
global structs, structs_flat, sfields, enums, flags, functions, constants
327+
global chunk_tmpl, lcf_struct_tmpl, rpg_header_tmpl, rpg_source_tmpl, flags_tmpl, enums_tmpl, fwd_tmpl, fwd_struct_tmpl
328+
329+
structs, structs_flat = get_structs('structs.csv','structs_easyrpg.csv')
330+
sfields = get_fields('fields.csv','fields_easyrpg.csv')
331+
enums = get_enums('enums.csv','enums_easyrpg.csv')
332+
flags = get_flags('flags.csv')
333+
functions = get_functions('functions.csv')
334+
constants = get_constants()
335+
336+
# Setup Jinja
337+
env.filters["qt_type"] = qt_type
338+
env.filters["inner_type"] = inner_type
339+
env.filters["struct_has_code"] = filter_structs_without_codes
340+
env.filters["num_flags"] = num_flags
341+
env.tests['is_db_string'] = type_is_db_string
342+
env.tests['is_string'] = type_is_string
343+
env.tests['is_array'] = type_is_array
344+
env.tests['is_array_of_struct'] = type_is_array_of_struct
345+
env.tests['is_struct'] = type_is_struct
346+
env.tests['can_write'] = type_can_write
347+
348+
globals = dict(
349+
structs=structs,
350+
structs_flat=structs_flat,
351+
fields=sfields,
352+
flags=flags,
353+
enums=enums,
354+
functions=functions,
355+
constants=constants,
356+
)
357+
358+
rpg_header_tmpl = env.get_template('rpg_header.tmpl', globals=globals)
359+
rpg_source_tmpl = env.get_template('rpg_source.tmpl', globals=globals)
360+
361+
generate()
362+
363+
if __name__ == '__main__':
364+
main(sys.argv)

0 commit comments

Comments
 (0)