SoLiXG:Greece’s recovery and resilience plan
Review of Greek policy
Abstract
Notes
Word Frequency
Most frequent words
GR | EN |
---|---|
('κόστους', 1013) |
('costs', 1013) ('investments', 912) ('Part', 830) ('reforms', 722) ('Recovery', 673) ('(ID:', 638) ('Investment', 622) ('Implementation', 615) ('Plan', 612) ('Resilience', 556) ('development', 497) ('cost', 491) ('Axis', 484) ('Description', 476) ('reforms', 445) ('labour', 437) ('system', 416) ('sector', 409) ('improvement', 371) ('Axis', 355) ('Complementarity', 353) ('data', 334) ('reform', 329) ('Framework', 319) ('Increase', 313) ('services', 313) ('Plan', 313) ('target', 310) ('Greece', 306) ('includes', 306) ('aid', 299) ('Reform', 295) ('Plan', 293) ('upgrade', 285) ('Investment', 285) ('Purchase', 279) ('also', 277) ('total', 275) ('digital', 266) ('In addition,', 265) ('digital', 251) ('investment', 249) ('economy', 248) ('project', 246) ('evaluation', 240) ('projects', 239) ('EU', 237) ('EU', 237) ('objectives', 234) ('services', 232) ('system', 231) ('project', 230) ('creation', 230) ('projects', 229) ('analysis', 225) ('transformation', 224) ('systems', 224) ('Integration', 224) ('related', 222) ('assessment', 218) ('Milestone', 217) ('country', 216) ('reduction', 216) ('promotion', 214) ('target', 211) ('use', 208) ('energy', 208) ('quarter', 207) ('concerns', 205) ('basis', 203) ('development', 201) ('data', 200) ('new', 199) ('management', 198) ('Digital', 197) ('Approach', 197) ('Challenges', 196) ('must', 194) ('skills', 190) ('business', 189) ('tackling', 188) ('analysis', 188) ('related', 184) ('information', 180) ('training', 177) ('policies', 177) ('protection', 177) ('provision', 176) ('Objective', 174) ('transformation', 174) ('market', 173) ('included', 172) ('2020', 170) ('control', 170) ('GDP', 167) ('authorities', 165) ('health', 164) ('expected', 163) ('targets', 161) ('according to', 161) ('Fund', 160) ('education', 160) ('implementation', 160) |
Script
<syntaxhighlight lang="python"> import re
from wordfreq import word_frequency
- this is a script to find the most frequent words in a textfile
lines = open('gr-policy.txt', 'r') text=lines.read() text_list=text.replace('\n', ' ').split(".")
lines.close()
sep_words=[] new_list=[] all_freq={}
frequency={} with open("output.txt", "a") as f: for l in text_list: for w in l.split(): sep_words.append(w) for word in sep_words: freq = sep_words.count(word) frequency={word:freq} all_freq.update(frequency) # all_freq.append(frequency) new_list=sorted(all_freq.items(), key=lambda item: item[1], reverse=True ) print(*new_list, sep = "\n", file=f)
</syntaxhighlight>