diff --git a/CollaborativeCoding/load_metric.py b/CollaborativeCoding/load_metric.py index ea0c574..22a7ea3 100644 --- a/CollaborativeCoding/load_metric.py +++ b/CollaborativeCoding/load_metric.py @@ -29,7 +29,7 @@ class MetricWrapper(nn.Module): Methods ------- __call__(y_true, y_pred) - Passes the true and predicted labels to the metric functions. + Passes the true and predicted logits to the metric functions. getmetrics(str_prefix: str = None) Retrieves the dictionary of computed metrics, optionally all keys can be prefixed with a string. resetmetric() @@ -40,10 +40,13 @@ class MetricWrapper(nn.Module): >>> from CollaborativeCoding import MetricWrapperProposed >>> metrics = MetricWrapperProposed(2, "entropy", "f1", "precision") >>> y_true = [0, 1, 0, 1] - >>> y_pred = [0, 1, 1, 0] + >>> y_pred = [[0.8, -1.9], + [0.1, 9.0], + [-1.9, -0.1], + [1.9, 1.8]] >>> metrics(y_true, y_pred) >>> metrics.getmetrics() - {'entropy': 0.6931471805599453, 'f1': 0.5, 'precision': 0.5} + {'entropy': 0.3292665, 'f1': 0.5, 'precision': 0.5} >>> metrics.resetmetric() >>> metrics.getmetrics() {'entropy': [], 'f1': [], 'precision': []} diff --git a/README.md b/README.md index d16966d..fc1d2ad 100644 --- a/README.md +++ b/README.md @@ -120,3 +120,22 @@ The table below presents the detailed results, showcasing the model's performanc | Validation | 1.019 | 0.995 | 0.680 | 0.680 | 0.680 | 0.680 | | Test | 1.196 | 0.985 | 0.634 | 0.634 | 0.634 | 0.634 | +## Citing +Please consider citing this repository if you end up using it for your work. +Several citation methods can be found under the "About" section. +For BibTeX citation please use +``` +@software{Thrun_Collaborative_Coding_Exam_2025, +author = {Thrun, Solveig and Salomonsen, Christian and Størdal, Magnus and Zavadil, Jan and Mylius-Kroken, Johan}, +month = feb, +title = {{Collaborative Coding Exam}}, +url = {https://github.com/SFI-Visual-Intelligence/Collaborative-Coding-Exam}, +version = {1.1.0}, +year = {2025} +} +``` + +For APA please use +``` +Thrun, S., Salomonsen, C., Størdal, M., Zavadil, J., & Mylius-Kroken, J. (2025). Collaborative Coding Exam (Version 1.1.0) [Computer software]. https://github.com/SFI-Visual-Intelligence/Collaborative-Coding-Exam +``` \ No newline at end of file diff --git a/doc/Magnus_page.md b/doc/Magnus_page.md index d0c4247..0be7933 100644 --- a/doc/Magnus_page.md +++ b/doc/Magnus_page.md @@ -67,8 +67,8 @@ It should be noted that a lot of our decisions came from a top-down perspective. All in all, we've made sure you don't really need to interact with the code outside setting up the correct arguments for the run, which is great for consistency. -# Challenges -## Running someone elses code +## Challenges +### Running someone elses code This section answers the question on what I found easy / difficult running another persons code. I found it quite easy to run others code. We had quite good tests, and once every test passed, I only had one error with the F1 score not handeling an unexpected edgecase. To fix this I raised an issue, and it was fixed shortly after. @@ -77,13 +77,13 @@ One thing I did find a bit difficult was when people would change integral parts The issues mentioned above also lead to a week or so where there was always a test failing, and the person whos' code was failing did not have time to work on it for a few days. -## Someone running my code +### Someone running my code This section answers the question on what I found easy / difficult having someone run my code. I did not experience that anyone had issues with my code. After I fixed all issues and tests related to my code, it seems to have run fine, and no issues have been raised to my awareness about this. -# Tools +## Tools This section answers the question of which tools from the course I used during the home-exam. For this exam I used quite a few tools from the course.