Skip to content

Commit

Permalink
feat: better serialization. Closes #57
Browse files Browse the repository at this point in the history
  • Loading branch information
d-krupke committed Oct 24, 2024
1 parent a7d27cd commit 13e8207
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 19 deletions.
35 changes: 25 additions & 10 deletions 05_parameters.md
Original file line number Diff line number Diff line change
Expand Up @@ -499,20 +499,35 @@ load the model on a different machine and run the solver.
```python
from ortools.sat.python import cp_model
from google.protobuf import text_format


def export_model(model: cp_model.CpModel, filename: str):
with open(filename, "w") as file:
file.write(text_format.MessageToString(model.Proto()))


def import_model(filename: str) -> cp_model.CpModel:
from pathlib import Path

def _detect_binary_mode(filename: str) -> bool:
if filename.endswith((".txt", ".pbtxt", ".pb.txt")):
return False
if filename.endswith((".pb", ".bin", ".proto.bin", ".dat")):
return True
raise ValueError(f"Unknown extension for file: {filename}")

def export_model(model: cp_model.CpModel, filename: str, binary: bool | None = None):
binary = _detect_binary_mode(filename) if binary is None else binary
if binary:
Path(filename).write_bytes(model.Proto().SerializeToString())
else:
Path(filename).write_text(text_format.MessageToString(model.Proto()))

def import_model(filename: str, binary: bool | None = None) -> cp_model.CpModel:
binary = _detect_binary_mode(filename) if binary is None else binary
model = cp_model.CpModel()
with open(filename, "r") as file:
text_format.Parse(file.read(), model.Proto())
if binary:
model.Proto().ParseFromString(Path(filename).read_bytes())
else:
text_format.Parse(Path(filename).read_text(), model.Proto())
return model
```

The binary mode is more efficient and should be used for large models. The text
mode is human-readable and can be easier shared and compared.

### Hints

If you have a good intuition about how the solution might look—perhaps from
Expand Down
33 changes: 24 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3366,20 +3366,35 @@ load the model on a different machine and run the solver.
```python
from ortools.sat.python import cp_model
from google.protobuf import text_format
from pathlib import Path


def export_model(model: cp_model.CpModel, filename: str):
with open(filename, "w") as file:
file.write(text_format.MessageToString(model.Proto()))


def import_model(filename: str) -> cp_model.CpModel:
def _detect_binary_mode(filename: str) -> bool:
if filename.endswith((".txt", ".pbtxt", ".pb.txt")):
return False
if filename.endswith((".pb", ".bin", ".proto.bin", ".dat")):
return True
raise ValueError(f"Unknown extension for file: {filename}")

def export_model(model: cp_model.CpModel, filename: str, binary: bool | None = None):
binary = _detect_binary_mode(filename) if binary is None else binary
if binary:
Path(filename).write_bytes(model.Proto().SerializeToString())
else:
Path(filename).write_text(text_format.MessageToString(model.Proto()))

def import_model(filename: str, binary: bool | None = None) -> cp_model.CpModel:
binary = _detect_binary_mode(filename) if binary is None else binary
model = cp_model.CpModel()
with open(filename, "r") as file:
text_format.Parse(file.read(), model.Proto())
if binary:
model.Proto().ParseFromString(Path(filename).read_bytes())
else:
text_format.Parse(Path(filename).read_text(), model.Proto())
return model
```

The binary mode is more efficient and should be used for large models. The text
mode is human-readable and can be easier shared and compared.

### Hints

If you have a good intuition about how the solution might look—perhaps from
Expand Down
49 changes: 49 additions & 0 deletions tests/test_serialization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import tempfile


def test_serialization():
from ortools.sat.python import cp_model
from google.protobuf import text_format
from pathlib import Path

def _detect_binary_mode(filename: str) -> bool:
if filename.endswith((".txt", ".pbtxt", ".pb.txt")):
return False
if filename.endswith((".pb", ".bin", ".proto.bin", ".dat")):
return True
raise ValueError(f"Unknown extension for file: {filename}")

def export_model(model: cp_model.CpModel, filename: str, binary: bool | None = None):
binary = _detect_binary_mode(filename) if binary is None else binary
if binary:
Path(filename).write_bytes(model.Proto().SerializeToString())
else:
Path(filename).write_text(text_format.MessageToString(model.Proto()))

def import_model(filename: str, binary: bool | None = None) -> cp_model.CpModel:
binary = _detect_binary_mode(filename) if binary is None else binary
model = cp_model.CpModel()
if binary:
model.Proto().ParseFromString(Path(filename).read_bytes())
else:
text_format.Parse(Path(filename).read_text(), model.Proto())
return model

model = cp_model.CpModel()
x = [model.NewIntVar(0, 10, f"x{i}") for i in range(10)]
model.add(sum(x)<=20)
model.maximize(sum(i*x[i] for i in range(10)))

with tempfile.TemporaryDirectory() as tmpdir:
export_model(model, f"{tmpdir}/model.pb")
model2 = import_model(f"{tmpdir}/model.pb")
assert model.Proto() == model2.Proto()
export_model(model, f"{tmpdir}/model.txt", binary=False)
model3 = import_model(f"{tmpdir}/model.txt", binary=False)
assert model.Proto() == model3.Proto()
export_model(model, f"{tmpdir}/model.pbtxt", binary=False)
model4 = import_model(f"{tmpdir}/model.pbtxt", binary=False)
assert model.Proto() == model4.Proto()
export_model(model, f"{tmpdir}/model.pb.txt", binary=True)
model5 = import_model(f"{tmpdir}/model.pb.txt", binary=True)
assert model.Proto() == model5.Proto()

0 comments on commit 13e8207

Please sign in to comment.